提交 f044948c 编写于 作者: P Pengcheng Tang

Add ddboost storage unit option into gpcrondump, gpdbrestore and gpmfr

When user dumps database to Data Domain Boost server, storage
unit and backup directory must be already created and specified,
previously, we hard coded the storage unit to "GPDB" and user
had no option to use others.

This commit adds --ddboost-storage-unit option, which allows
user to dynamically specify storage unit for dump and restore.

This commits allows user to have storage unit information
statically saved into configure file in their cluster host.

This commit added storage unit option into gpmfr for replicating
and recovering dump copies, in which case it uses identical storage
unit and backup directory between primary and secondary DDBoost server.

--ddboost-storage-unit option takes higher priority than using
statically configured storage unit.

Authors:
Pengcheng Tang, Marbin Tan, Nikhil Kak
Lawrence Hamel, Stephen Wu, Chris Hajas, Chumki Roy
上级 afdc27f8
此差异已折叠。
......@@ -182,6 +182,11 @@ class GpdbRestore(Operation):
raise Exception(str(e))
self.ddboost = options.ddboost
self.ddboost_storage_unit = options.ddboost_storage_unit
if self.ddboost_storage_unit and not self.ddboost:
raise Exception('Cannot use ddboost storage unit option without specifying --ddboost option.')
if self.ddboost:
if self.netbackup_service_host:
raise Exception('--ddboost option is not supported with NetBackup')
......@@ -191,13 +196,15 @@ class GpdbRestore(Operation):
elif self.backup_dir:
raise ExceptionNoStackTraceNeeded('-u cannot be used with DDBoost parameters.')
dd = gpmfr.DDSystem("local")
self.dump_dir = dd.defaultBackupDir
self.dump_dir = dd.DDBackupDir
else:
self.dump_dir = 'db_dumps'
def execute(self):
if self.ddboost:
cmdline = 'gpddboost --sync --dir=%s' % self.master_datadir
if self.ddboost_storage_unit:
cmdline += ' --ddboost-storage-unit %s' % self.ddboost_storage_unit
cmd = Command('DDBoost sync', cmdline)
cmd.run(validateAfter=True)
......@@ -297,6 +304,7 @@ class GpdbRestore(Operation):
dump_dir = self.dump_dir,
dump_prefix = self.dump_prefix,
ddboost = self.ddboost,
ddboost_storage_unit = self.ddboost_storage_unit,
no_plan = self.no_plan,
restore_tables = self.restore_tables,
batch_default = self.batch_default,
......@@ -506,6 +514,7 @@ class GpdbRestore(Operation):
dump_prefix = self.dump_prefix,
compress = compress,
ddboost = self.ddboost,
ddboost_storage_unit = self.ddboost_storage_unit,
netbackup_service_host = self.netbackup_service_host,
remote_host = dump_host,
dump_file = dump_file).get_dump_tables()
......@@ -752,6 +761,9 @@ def create_parser():
# For DDBoost Restore
ddOpt = OptionGroup(parser, "DDBoost")
ddOpt.add_option('--ddboost', dest='ddboost', help="Dump to DDBoost using ~/.ddconfig", action="store_true", default=False)
ddOpt.add_option('--ddboost-storage-unit', dest='ddboost_storage_unit', default=None,
help="Storage unit where backup files are retrieved from the ddboost server")
parser.add_option_group(ddOpt)
return parser
......
......@@ -74,6 +74,8 @@ def mfr_parser():
ddOpt.add_option("--show-streams", dest="showStreams", action="store_true",
default=False, help="Show I/O streams used for DD Boost" +
" MFR on local and remote Data Domains.")
ddOpt.add_option("--ddboost-storage-unit", dest="ddboost_storage_unit", action="store", default=None,
help="Storage unit name in Data Domain server.")
parser.add_option_group(ddOpt)
optOpt = OptionGroup(parser, "Optional arguments used by operations")
optOpt.add_option("--remote", dest="remote", action="store_true",
......@@ -279,7 +281,7 @@ class DDSystem(object):
5028: "Failed to connect to DD System, check hostname/IP address."
}
def __init__(self, id):
def __init__(self, id, backup_dir=None, dd_storage_unit=None):
self.id = id
if id == "local":
self.argSuffix = ""
......@@ -290,9 +292,18 @@ class DDSystem(object):
raise NotImplemented(
"Only local and remote DD systems are supported currently.")
self._checkConfigExists(id)
self.defaultBackupDir = None
self.hostname = None
self._readConfig()
self.DDBackupDir = backup_dir
self.DDStorageUnit = dd_storage_unit
self.hostname, default_backup_dir, default_storage_unit = self._readConfig()
if not self.DDStorageUnit:
self.DDStorageUnit = default_storage_unit
if not self.DDBackupDir:
self.DDBackupDir = default_backup_dir
self._replSourceLimit = None
self._replDestLimit = None
......@@ -365,26 +376,43 @@ class DDSystem(object):
logger.error(msg + str(r))
raise Exception(msg)
def findItem(self, pattern, line):
m = pattern.match(line)
if m:
return m.group(1)
else:
return None
def _readConfig(self):
"""
Run gpddboost to get default backup dir and DD hostname.
"""
default_hostname = None
default_backup_dir = None
default_ddboost_storage_unit = None
phost = re.compile("Data Domain Hostname:([^ \t]*)")
pdir = re.compile("Default Backup Directory:([^ \t]*)")
pstu = re.compile("Data Domain Storage Unit:([^ \t]*)")
rc, lines = self._runDDBoost("--show-config")
if rc != 0:
code = self._parseError(lines)
self.printErrorAndAbort(code)
for l in lines:
m = pdir.match(l)
if m:
self.defaultBackupDir = m.group(1)
else:
m = phost.match(l)
if m:
self.hostname = m.group(1)
if self.hostname is None or self.hostname == "":
if not default_hostname:
default_hostname = self.findItem(phost, l)
if not default_backup_dir:
default_backup_dir = self.findItem(pdir, l)
if not default_ddboost_storage_unit:
default_ddboost_storage_unit = self.findItem(pstu, l)
if default_hostname is None or default_hostname == "":
raise Exception("Failed to obtain Data Domain configuration.")
if self.id == "remote":
# For remote Data Domain, default backup dir is not shown by
# gpddboost. Hence we employ the following ugly trick to get
......@@ -398,11 +426,18 @@ class DDSystem(object):
code = self._parseError(lines)
self.printErrorAndAbort(code)
for l in lines:
m = pdir.match(l)
if m:
self.defaultBackupDir = m.group(1)
if self.defaultBackupDir is None or self.defaultBackupDir == "":
raise Exception("Failed to obtain Data Domain configuration.")
if not default_backup_dir:
default_backup_dir = self.findItem(pdir, l)
if not default_ddboost_storage_unit:
default_ddboost_storage_unit = self.findItem(pstu, l)
if default_backup_dir is None or default_backup_dir == "":
raise Exception("Failed to obtain backup directory from Data Domain configuration.")
if default_ddboost_storage_unit is None or default_ddboost_storage_unit == "":
raise Exception("Failed to obtain storage unit from Data Domain configuration.")
return default_hostname, default_backup_dir, default_ddboost_storage_unit
def _ddBoostStreamCounts(self, regex):
"""
......@@ -426,7 +461,7 @@ class DDSystem(object):
Returns None if path not found on DD system. Otherwise returns listing
of files/directories, one per line.
"""
args = "--ls " + path
args = "--ls " + path + " --ddboost-storage-unit=" + self.DDStorageUnit
rc, lines = self._runDDBoost(args)
if rc != 0:
code = self._parseError(lines)
......@@ -436,7 +471,7 @@ class DDSystem(object):
return lines
def deleteFile(self, path):
args = "--del-file " + path
args = "--del-file " + path + " --ddboost-storage-unit=" + self.DDStorageUnit
rc, lines = self._runDDBoost(args)
if rc != 0:
code = self._parseError(lines)
......@@ -513,12 +548,12 @@ class DDSystem(object):
def verifyLogin(self):
"""
"gpddboost --verify" connects to DD system using configured username and
password. gpddboost also creates "GPDB" storage unit on the DD system
if one doesn't exist.
password. gpddboost also creates storage unit on the DD system if one
doesn't exist.
"""
rc, lines = self._runDDBoost("--verify")
rc, lines = self._runDDBoost("--verify --ddboost-storage-unit %s" % self.DDStorageUnit)
if rc != 0:
logger.info("gpddboost --verify: %s" % "\n".join(lines))
logger.info("gpddboost --verify --ddboost-storage-unit %s: %s" % (self.DDStorageUnit, "\n".join(lines)))
code = self._parseError(lines)
self.printErrorAndAbort(code)
......@@ -633,7 +668,7 @@ class Scheduler(object):
"Cannot start %d workers with %d backup files." %
(start + count, len(self.backupSet.backupFiles)))
for bf in self.backupSet.backupFiles[start : start + count]:
bf.fullPath = "/".join([self.sourceDD.defaultBackupDir,
bf.fullPath = "/".join([self.sourceDD.DDBackupDir,
self.backupSet.backupDate, bf.name])
w = ReplicateWorker(bf, self.sourceDD, self.targetDD,
self.replCancelEv, self.replAbortEv)
......@@ -727,8 +762,8 @@ class Scheduler(object):
else:
for w in self.replWorkers:
w.join()
print "\nBackup '%s' transfered from %s to %s Data Domain." % \
(self.backupSet.bt, self.sourceDD, self.targetDD)
print ("\nBackup '%s' transferred from %s to %s Data Domain." %
(self.backupSet.bt, self.sourceDD, self.targetDD))
def delete(self):
"""
......@@ -768,7 +803,7 @@ class Scheduler(object):
batch = files[workDone : workDone + batchSize]
deleteWorkers = []
for f in batch:
f.fullPath = "/".join([ddSystem.defaultBackupDir,
f.fullPath = "/".join([ddSystem.DDBackupDir,
self.backupSet.backupDate, f.name])
dw = DeleteWorker(f, ddSystem)
dw.start()
......@@ -825,17 +860,22 @@ class ReplicateWorker(Thread):
def _ddBoostCmd(self):
# TODO: This method needs to change in Phase II.
cmd = DDBOOST_EXEC
ddboost_storage_unit = None
if self.sourceDD.id == "local" and self.targetDD.id == "remote":
cmd = cmd + " --replicate"
ddboost_storage_unit = self.sourceDD.DDStorageUnit
elif self.sourceDD.id == "remote" and self.targetDD.id == "local":
cmd = cmd + " --recover"
ddboost_storage_unit = self.targetDD.DDStorageUnit
else:
msg = "Invalid DD system pair: source = %s target = %s" % \
(self.sourceDD, self.targetDD)
logger.error(msg)
raise Exception(msg)
cmd = cmd + " --from-file " + self.backupFile.fullPath + \
" --to-file " + self.backupFile.fullPath
" --to-file " + self.backupFile.fullPath + \
" --ddboost-storage-unit " + ddboost_storage_unit
return cmd
def isInProgress(self):
......@@ -1161,10 +1201,10 @@ class GpMfr(Operation):
def execute(self):
if self.options.remote:
self.ddSystem = DDSystem("remote")
self.ddSystem = DDSystem("remote", dd_storage_unit=self.options.ddboost_storage_unit)
else:
self.ddSystem = DDSystem("local")
self.defaultDir = self.ddSystem.defaultBackupDir
self.ddSystem = DDSystem("local", dd_storage_unit=self.options.ddboost_storage_unit)
self.defaultDir = self.ddSystem.DDBackupDir
if self.options.ping:
self.checkDDReachable(self.ddSystem)
if not self.options.showStreams:
......@@ -1184,19 +1224,19 @@ class GpMfr(Operation):
self.deleteBackup(self.timestamp)
elif self.options.showStreams:
localDD = self.ddSystem
remoteDD = DDSystem("remote")
remoteDD = DDSystem("remote", dd_storage_unit=self.options.ddboost_storage_unit)
if self.options.ping:
self.checkDDReachable(remoteDD)
self.showDDBoostIOStreams(localDD, remoteDD)
elif self.options.replicate:
sourceDD = self.ddSystem
targetDD = DDSystem("remote")
targetDD = DDSystem("remote", dd_storage_unit=self.options.ddboost_storage_unit)
if self.options.ping:
self.checkDDReachable(targetDD)
self.replicateBackup(self.timestamp, sourceDD, targetDD,
int(self.options.maxStreams))
elif self.options.recover:
sourceDD = DDSystem("remote")
sourceDD = DDSystem("remote", dd_storage_unit=self.options.ddboost_storage_unit)
if self.options.ping:
self.checkDDReachable(sourceDD)
targetDD = self.ddSystem
......@@ -1441,8 +1481,7 @@ class GpMfr(Operation):
print "No valid backup set found on %s Data Domain." % sourceDD
return
else:
print "Identifying backup files on %s Data Domain." % \
sourceDD
print "Identifying backup files on %s Data Domain." % sourceDD
sourceBset = self._backupSet(bkptimestamp, sourceDD)
if not sourceBset:
print "Backup '%s' not found on %s Data Domain." % \
......@@ -1452,9 +1491,10 @@ class GpMfr(Operation):
msg = "Backup '%s' does not appear to be a valid backup set."
logger.error(msg % sourceBset.bt)
return
msg = "Initiating transfer for %d files from %s to %s " + \
"Data Domain."
msg = "Initiating transfer for %d files from %s to %s Data Domain."
print msg % (len(sourceBset.backupFiles), sourceDD, targetDD)
targetBset = self._backupSet(sourceBset.bt, targetDD)
if targetBset is not None:
deleteTarget = False
......@@ -1476,11 +1516,12 @@ class GpMfr(Operation):
quiet=self.options.quiet)
scheduler.delete()
else:
# Create "GPDB" storage unit on target Data Domain if one doesn't
# Create storage unit on target Data Domain if one doesn't
# exist.
targetDD.verifyLogin()
print "Using at the most %d I/O streams on each Data Domain." % \
maxStreams
print "Using at the most %d I/O streams on each Data Domain." % maxStreams
scheduler = Scheduler(sourceBset, sourceDD, targetDD,
self.options.quiet)
scheduler.replicate(maxStreams)
......
......@@ -169,23 +169,25 @@ def convert_reportfilename_to_cdatabasefilename(report_file, dump_prefix, ddboos
dirname = "%s/%s" % (dirname, timestamp[0:8])
return "%s/%sgp_cdatabase_1_1_%s" % (dirname, dump_prefix, timestamp)
def get_lines_from_dd_file(filename):
cmd = Command('DDBoost copy of master dump file',
'gpddboost --readFile --from-file=%s'
% (filename))
def get_lines_from_dd_file(filename, ddboost_storage_unit):
cmdStr = 'gpddboost --readFile --from-file=%s' % filename
if ddboost_storage_unit:
cmdStr += ' --ddboost-storage-unit=%s' % ddboost_storage_unit
cmd = Command('DDBoost copy of master dump file', cmdStr)
cmd.run(validateAfter=True)
contents = cmd.get_results().stdout.splitlines()
return contents
def check_cdatabase_exists(dbname, report_file, dump_prefix, ddboost=False, netbackup_service_host=None, netbackup_block_size=None):
def check_cdatabase_exists(dbname, report_file, dump_prefix, ddboost=False, ddboost_storage_unit=None, netbackup_service_host=None, netbackup_block_size=None):
try:
filename = convert_reportfilename_to_cdatabasefilename(report_file, dump_prefix, ddboost)
except Exception:
return False
if ddboost:
cdatabase_contents = get_lines_from_dd_file(filename)
cdatabase_contents = get_lines_from_dd_file(filename, ddboost_storage_unit)
elif netbackup_service_host:
restore_file_with_nbu(netbackup_service_host, netbackup_block_size, filename)
cdatabase_contents = get_lines_from_file(filename)
......@@ -250,13 +252,13 @@ def get_all_occurrences(substr, line):
return None
return [m.start() for m in re.finditer('(?=%s)' % substr, line)]
def get_type_ts_from_report_file(dbname, report_file, backup_type, dump_prefix, ddboost=False, netbackup_service_host=None, netbackup_block_size=None):
def get_type_ts_from_report_file(dbname, report_file, backup_type, dump_prefix, ddboost=False, ddboost_storage_unit=None, netbackup_service_host=None, netbackup_block_size=None):
report_file_contents = get_lines_from_file(report_file)
if not check_successful_dump(report_file_contents):
return None
if not check_cdatabase_exists(dbname, report_file, dump_prefix, ddboost, netbackup_service_host, netbackup_block_size):
if not check_cdatabase_exists(dbname, report_file, dump_prefix, ddboost, ddboost_storage_unit, netbackup_service_host, netbackup_block_size):
return None
if check_backup_type(report_file_contents, backup_type):
......@@ -264,11 +266,11 @@ def get_type_ts_from_report_file(dbname, report_file, backup_type, dump_prefix,
return None
def get_full_ts_from_report_file(dbname, report_file, dump_prefix, ddboost=False, netbackup_service_host=None, netbackup_block_size=None):
return get_type_ts_from_report_file(dbname, report_file, 'Full', dump_prefix, ddboost, netbackup_service_host, netbackup_block_size)
def get_full_ts_from_report_file(dbname, report_file, dump_prefix, ddboost=False, ddboost_storage_unit=None, netbackup_service_host=None, netbackup_block_size=None):
return get_type_ts_from_report_file(dbname, report_file, 'Full', dump_prefix, ddboost, ddboost_storage_unit, netbackup_service_host, netbackup_block_size)
def get_incremental_ts_from_report_file(dbname, report_file, dump_prefix, ddboost=False, netbackup_service_host=None, netbackup_block_size=None):
return get_type_ts_from_report_file(dbname, report_file, 'Incremental', dump_prefix, ddboost, netbackup_service_host, netbackup_block_size)
def get_incremental_ts_from_report_file(dbname, report_file, dump_prefix, ddboost=False, ddboost_storage_unit=None, netbackup_service_host=None, netbackup_block_size=None):
return get_type_ts_from_report_file(dbname, report_file, 'Incremental', dump_prefix, ddboost, ddboost_storage_unit, netbackup_service_host, netbackup_block_size)
def get_timestamp_val(report_file_contents):
for line in report_file_contents:
......@@ -302,13 +304,13 @@ def get_lines_from_zipped_file(fname):
fd.close()
return content
def get_lines_from_file(fname, ddboost=None):
def get_lines_from_file(fname, ddboost=None, ddboost_storage_unit=None):
"""
Don't strip white space here as it may be part of schema name and table name
"""
content = []
if ddboost:
contents = get_lines_from_dd_file(fname)
contents = get_lines_from_dd_file(fname, ddboost_storage_unit)
return contents
else:
with open(fname) as fd:
......@@ -551,7 +553,7 @@ def get_full_timestamp_for_incremental(master_datadir, dump_dir, dump_prefix, in
# backup_dir will be either MDD or some other directory depending on call
def get_latest_full_dump_timestamp(dbname, backup_dir, dump_dir, dump_prefix, ddboost=False):
def get_latest_full_dump_timestamp(dbname, backup_dir, dump_dir, dump_prefix, ddboost=False, ddboost_storage_unit=None):
if not backup_dir:
raise Exception('Invalid None param to get_latest_full_dump_timestamp')
......@@ -573,7 +575,7 @@ def get_latest_full_dump_timestamp(dbname, backup_dir, dump_dir, dump_prefix, dd
dump_report_files = sorted(dump_report_files, key=lambda x: int(x.split('_')[-1].split('.')[0]), reverse=True)
for dump_report_file in dump_report_files:
logger.debug('Checking for latest timestamp in report file %s' % os.path.join(dump_dir, dump_report_file))
timestamp = get_full_ts_from_report_file(dbname, os.path.join(dump_dir, dump_report_file), dump_prefix, ddboost)
timestamp = get_full_ts_from_report_file(dbname, os.path.join(dump_dir, dump_report_file), dump_prefix, ddboost, ddboost_storage_unit)
logger.debug('Timestamp = %s' % timestamp)
if timestamp is not None:
return timestamp
......
......@@ -400,7 +400,7 @@ def statistics_file_dumped(master_datadir, backup_dir, dump_dir, dump_prefix, re
return check_file_dumped_with_nbu(netbackup_service_host, statistics_filename)
def _build_gpdbrestore_cmd_line(ts, table_file, backup_dir, redirected_restore_db, report_status_dir, dump_prefix, ddboost=False, netbackup_service_host=None,
netbackup_block_size=None, change_schema=None, schema_level_restore_file=None):
netbackup_block_size=None, change_schema=None, schema_level_restore_file=None, ddboost_storage_unit=None):
cmd = 'gpdbrestore -t %s --table-file %s -a -v --noplan --noanalyze --noaostats --no-validate-table-name' % (ts, table_file)
if backup_dir is not None:
cmd += " -u %s" % backup_dir
......@@ -412,6 +412,8 @@ def _build_gpdbrestore_cmd_line(ts, table_file, backup_dir, redirected_restore_d
cmd += " --report-status-dir=%s" % report_status_dir
if ddboost:
cmd += " --ddboost"
if ddboost_storage_unit:
cmd += " --ddboost-storage-unit=%s" % ddboost_storage_unit
if netbackup_service_host:
cmd += " --netbackup-service-host=%s" % netbackup_service_host
if netbackup_block_size:
......@@ -469,7 +471,8 @@ class RestoreDatabase(Operation):
def __init__(self, restore_timestamp, no_analyze, drop_db, restore_global, master_datadir, backup_dir,
master_port, dump_dir, dump_prefix, no_plan, restore_tables, batch_default, no_ao_stats,
redirected_restore_db, report_status_dir, restore_stats, metadata_only, ddboost,
netbackup_service_host, netbackup_block_size, change_schema, schema_level_restore_list):
netbackup_service_host, netbackup_block_size, change_schema, schema_level_restore_list,
ddboost_storage_unit=None):
self.restore_timestamp = restore_timestamp
self.no_analyze = no_analyze
self.drop_db = drop_db
......@@ -488,6 +491,7 @@ class RestoreDatabase(Operation):
self.restore_stats = restore_stats
self.metadata_only = metadata_only
self.ddboost = ddboost
self.ddboost_storage_unit = ddboost_storage_unit
self.netbackup_service_host = netbackup_service_host
self.netbackup_block_size = netbackup_block_size
self.change_schema = change_schema
......@@ -764,7 +768,8 @@ class RestoreDatabase(Operation):
self.redirected_restore_db,
self.report_status_dir, self.dump_prefix,
self.ddboost, self.netbackup_service_host,
self.netbackup_block_size, self.change_schema)
self.netbackup_block_size, self.change_schema,
self.ddboost_storage_unit)
logger.info('Invoking commandline: %s' % cmd)
Command('Invoking gpdbrestore', cmd).run(validateAfter=True)
table_files.append(table_file)
......@@ -981,6 +986,8 @@ class RestoreDatabase(Operation):
restore_line += " --gp-nostats"
if self.ddboost:
restore_line += " --ddboost"
if self.ddboost_storage_unit:
restore_line += " --ddboost-storage-unit=%s" % self.ddboost_storage_unit
if self.netbackup_service_host:
restore_line += " --netbackup-service-host=%s" % self.netbackup_service_host
if self.netbackup_block_size:
......@@ -1030,6 +1037,8 @@ class RestoreDatabase(Operation):
if self.ddboost:
restore_line += " --ddboost"
if self.ddboost_storage_unit:
restore_line += " --ddboost-storage-unit=%s" % self.ddboost_storage_unit
if self.netbackup_service_host:
restore_line += " --netbackup-service-host=%s" % self.netbackup_service_host
if self.netbackup_block_size:
......@@ -1069,6 +1078,8 @@ class RestoreDatabase(Operation):
if self.ddboost:
restore_line += " --ddboost"
if self.ddboost_storage_unit:
restore_line += " --ddboost-storage-unit=%s" % self.ddboost_storage_unit
if self.netbackup_service_host:
restore_line += " --netbackup-service-host=%s" % self.netbackup_service_host
if self.netbackup_block_size:
......@@ -1432,13 +1443,17 @@ class GetDumpTablesOperation(Operation):
return ret
class GetDDboostDumpTablesOperation(GetDumpTablesOperation):
def __init__(self, restore_timestamp, master_datadir, backup_dir, dump_dir, dump_prefix, compress, dump_file):
def __init__(self, restore_timestamp, master_datadir, backup_dir, dump_dir, dump_prefix, compress, dump_file, ddboost_storage_unit=None):
self.dump_file = dump_file
self.ddboost_storage_unit = ddboost_storage_unit
super(GetDDboostDumpTablesOperation, self).__init__(restore_timestamp, master_datadir, backup_dir, dump_dir, dump_prefix, compress)
def execute(self):
ddboost_cmdStr = 'gpddboost --readFile --from-file=%s' % self.dump_file
if self.ddboost_storage_unit:
ddboost_cmdStr += ' --ddboost-storage-unit=%s' % self.ddboost_storage_unit
cmdStr = ddboost_cmdStr + self.gunzip_maybe + self.grep_cmdStr
cmd = Command('DDBoost copy of master dump file', cmdStr)
......@@ -1508,7 +1523,7 @@ class GetDumpTables():
def __init__(self, restore_timestamp, master_datadir, backup_dir,
dump_dir, dump_prefix, compress, ddboost,
netbackup_service_host, remote_host=None,
dump_file=None):
dump_file=None, ddboost_storage_unit=None):
"""
backup_dir: user specified backup directory, using -u option
dump_dir: dump directory name, e.g. ddboost default dump directory
......@@ -1526,11 +1541,12 @@ class GetDumpTables():
self.netbackup_service_host = netbackup_service_host
self.remote_hostname = remote_host
self.dump_file = dump_file
self.ddboost_storage_unit = ddboost_storage_unit
def get_dump_tables(self):
if self.ddboost:
get_dump_table_cmd = GetDDboostDumpTablesOperation(self.restore_timestamp, self.master_datadir, self.backup_dir,
self.dump_dir, self.dump_prefix, self.compress, self.dump_file)
self.dump_dir, self.dump_prefix, self.compress, self.dump_file, self.ddboost_storage_unit)
elif self.netbackup_service_host:
get_dump_table_cmd = GetNetBackupDumpTablesOperation(self.restore_timestamp, self.master_datadir, self.backup_dir, self.dump_dir,
self.dump_prefix, self.compress, self.netbackup_service_host, self.dump_file)
......
......@@ -22,7 +22,7 @@ from gppylib.operations.dump import DumpDatabase, DumpGlobal, compare_dict, crea
backup_global_file_with_nbu, backup_config_files_with_nbu, backup_report_file_with_ddboost, \
backup_increments_file_with_ddboost, copy_file_to_dd, backup_dirty_file_with_nbu, backup_increments_file_with_nbu, \
backup_partition_list_file_with_nbu, get_include_schema_list_from_exclude_schema, backup_schema_file_with_ddboost, \
update_filter_file_with_dirty_list, TIMESTAMP, TIMESTAMP_KEY, DUMP_DATE
update_filter_file_with_dirty_list, TIMESTAMP, TIMESTAMP_KEY, DUMP_DATE, DeleteCurrentDump, DeleteOldestDumps
from mock import patch, MagicMock, Mock
class DumpTestCase(unittest.TestCase):
......@@ -186,11 +186,11 @@ class DumpTestCase(unittest.TestCase):
CreateIncrementsFile.validate_increments_file('testdb', '/tmp/fn', '/data', None, None, None)
def test08_CreateIncrementsFile_init(self):
obj = CreateIncrementsFile('testdb', '20121225000000', '20121226000000', '/data', None, self.dumper.dump_dir, self.dumper.dump_prefix, False, None, None)
obj = CreateIncrementsFile('testdb', '20121225000000', '20121226000000', '/data', None, self.dumper.dump_dir, self.dumper.dump_prefix, False, None, None, None)
self.assertEquals(obj.increments_filename, '/data/db_dumps/20121225/gp_dump_20121225000000_increments')
def test09_CreateIncrementsFile_execute(self):
obj = CreateIncrementsFile('testdb', '20121225000000', '20121226000000', '/data', None, self.dumper.dump_dir, self.dumper.dump_prefix, False, None, None)
obj = CreateIncrementsFile('testdb', '20121225000000', '20121226000000', '/data', None, self.dumper.dump_dir, self.dumper.dump_prefix, False, None, None, None)
obj.increments_filename = os.path.join(os.getcwd(), 'test.increments')
if os.path.isfile(obj.increments_filename):
os.remove(obj.increments_filename)
......@@ -199,7 +199,7 @@ class DumpTestCase(unittest.TestCase):
os.remove(obj.increments_filename)
def test10_CreateIncrementsFile_execute(self):
obj = CreateIncrementsFile('testdb', '20121225000000', '20121226000000', '/data', None, self.dumper.dump_dir, self.dumper.dump_prefix, False, None, None)
obj = CreateIncrementsFile('testdb', '20121225000000', '20121226000000', '/data', None, self.dumper.dump_dir, self.dumper.dump_prefix, False, None, None, None)
obj.increments_filename = os.path.join(os.getcwd(), 'test.increments')
with open(obj.increments_filename, 'w') as fd:
fd.write('20121225100000')
......@@ -209,7 +209,7 @@ class DumpTestCase(unittest.TestCase):
@patch('gppylib.operations.dump.CreateIncrementsFile.validate_increments_file')
def test11_CreateIncrementsFile_execute(self, mock1):
obj = CreateIncrementsFile('testdb', '20121225000000', '20121226000000', '/data', None, self.dumper.dump_dir, self.dumper.dump_prefix, False, None, None)
obj = CreateIncrementsFile('testdb', '20121225000000', '20121226000000', '/data', None, self.dumper.dump_dir, self.dumper.dump_prefix, False, None, None, None)
obj.increments_filename = os.path.join(os.getcwd(), 'test.increments')
with open(obj.increments_filename, 'w') as fd:
fd.write('20121225100000\n')
......@@ -219,7 +219,7 @@ class DumpTestCase(unittest.TestCase):
@patch('gppylib.operations.dump.get_lines_from_file', return_value=[])
def test12_CreateIncrementsFile_execute(self, mock1):
obj = CreateIncrementsFile('testdb', '20121225000000', '20121226000000', '/data', None, self.dumper.dump_dir, self.dumper.dump_prefix, False, None, None)
obj = CreateIncrementsFile('testdb', '20121225000000', '20121226000000', '/data', None, self.dumper.dump_dir, self.dumper.dump_prefix, False, None, None, None)
obj.increments_filename = os.path.join(os.getcwd(), 'test.increments')
with self.assertRaisesRegexp(Exception, 'File not written to'):
result = obj.execute()
......@@ -228,7 +228,7 @@ class DumpTestCase(unittest.TestCase):
@patch('gppylib.operations.dump.get_lines_from_file', return_value=['20121225100000'])
@patch('gppylib.operations.dump.CreateIncrementsFile.validate_increments_file')
def test13_CreateIncrementsFile_execute(self, mock1, mock2):
obj = CreateIncrementsFile('testdb', '20121225000000', '20121226000000', '/data', None, self.dumper.dump_dir, self.dumper.dump_prefix, False, None, None)
obj = CreateIncrementsFile('testdb', '20121225000000', '20121226000000', '/data', None, self.dumper.dump_dir, self.dumper.dump_prefix, False, None, None, None)
obj.increments_filename = os.path.join(os.getcwd(), 'test.increments')
with open(obj.increments_filename, 'w') as fd:
fd.write('20121225100000\n')
......@@ -239,7 +239,7 @@ class DumpTestCase(unittest.TestCase):
@patch('gppylib.operations.dump.get_lines_from_file', return_value=['20121225100001', '20121226000000'])
@patch('gppylib.operations.dump.CreateIncrementsFile.validate_increments_file')
def test14_CreateIncrementsFile_execute(self, mock1, mock2):
obj = CreateIncrementsFile('testdb', '20121225000000', '20121226000000', '/data', None, self.dumper.dump_dir, self.dumper.dump_prefix, False, None, None)
obj = CreateIncrementsFile('testdb', '20121225000000', '20121226000000', '/data', None, self.dumper.dump_dir, self.dumper.dump_prefix, False, None, None, None)
obj.increments_filename = os.path.join(os.getcwd(), 'test.increments')
with open(obj.increments_filename, 'w') as fd:
fd.write('20121225100000\n')
......@@ -1057,8 +1057,10 @@ class DumpTestCase(unittest.TestCase):
dump_database = 'testdb'
netbackup_service_host = "mdw"
netbackup_block_size = "1024"
ddboost = False
storage_unit = None
expected_output = '/foo/db_dumps/20130101/metro_gp_dump_20130101010101_filter'
self.assertEquals(expected_output, get_filter_file(dump_database, master_datadir, backup_dir, self.dumper.dump_dir, dump_prefix, netbackup_service_host, netbackup_block_size))
self.assertEquals(expected_output, get_filter_file(dump_database, master_datadir, backup_dir, self.dumper.dump_dir, dump_prefix, ddboost, storage_unit, netbackup_service_host, netbackup_block_size))
@patch('gppylib.operations.dump.get_latest_full_dump_timestamp', return_value='20130101010101')
@patch('os.path.isfile', return_value=False)
......@@ -1143,9 +1145,11 @@ class DumpTestCase(unittest.TestCase):
dump_database = 'testdb'
netbackup_service_host = "mdw"
netbackup_block_size = "1024"
ddboost = False
storage_unit = None
dirty_tables = ['public.t1', 'public.t2', 'pepper.t1', 'pepper.t2']
expected_output = ['public.t1', 'pepper.t2']
self.assertEquals(sorted(expected_output), sorted(filter_dirty_tables(dirty_tables, dump_database, master_datadir, backup_dir, self.dumper.dump_dir, dump_prefix, netbackup_service_host, netbackup_block_size)))
self.assertEquals(sorted(expected_output), sorted(filter_dirty_tables(dirty_tables, dump_database, master_datadir, backup_dir, self.dumper.dump_dir, dump_prefix, ddboost, storage_unit, netbackup_service_host, netbackup_block_size)))
@patch('gppylib.operations.dump.get_lines_from_file', return_value=['public.t1', 'pepper.t2'])
@patch('gppylib.operations.dump.get_latest_full_dump_timestamp', return_value='20130101010101')
......@@ -1185,10 +1189,11 @@ class DumpGlobalTestCase(unittest.TestCase):
backup_dir='/foo',
dump_dir='db_dumps',
dump_prefix='',
ddboost=False)
ddboost=False,
ddboost_storage_unit=None)
def test_create_pgdump_command_line(self):
self.dumper = DumpGlobal(timestamp=TIMESTAMP_KEY, master_datadir='/foo', master_port=9000, backup_dir='/foo', dump_dir='db_dumps', dump_prefix='', ddboost=False)
self.dumper = DumpGlobal(timestamp=TIMESTAMP_KEY, master_datadir='/foo', master_port=9000, backup_dir='/foo', dump_dir='db_dumps', dump_prefix='', ddboost=False, ddboost_storage_unit=None)
global_file_name = '/foo/db_dumps/%s/gp_global_1_1_%s' % (DUMP_DATE, TIMESTAMP_KEY)
expected_output = "pg_dumpall -p 9000 -g --gp-syntax > %s" % global_file_name
......@@ -2850,5 +2855,56 @@ class MailEventTestCase(unittest.TestCase):
m = MailEvent(subject="test", message="Hello", to_addrs="example@gopivotal.com")
m.execute()
class DeleteCurrentDumpTestCase(unittest.TestCase):
@patch('gppylib.operations.dump.dbconn.DbURL')
@patch('gppylib.operations.dump.DeleteCurrentSegDump.run', return_value=None)
@patch('gppylib.operations.dump.GpArray.initFromCatalog')
@patch('gppylib.operations.dump.GpArray.getDbList', return_value=[])
@patch('gppylib.commands.base.Command.__init__', return_value=None)
@patch('gppylib.commands.base.Command.run', return_value=None)
@patch('gppylib.commands.base.Command.get_results')
def test_delete_from_dynamic_ddboost_option(self, m1, m2, m3, m4, m5, m6, m7):
timestamp = datetime.now().strftime("%Y%m%d%H%M%S")
dump_file = 'gp_cdatabase_1_1_%s' % timestamp
m1.return_value.stdout = dump_file
port = 5432
dump_dir = 'MFR_TINC'
ddboost = True
storage_unit = 'TEMP'
DeleteCurrentDump(timestamp, None, port, dump_dir, ddboost, storage_unit).run()
m3.assert_any_call('DDBoost list dump files', 'gpddboost --listDirectory --dir=MFR_TINC/%s --ddboost-storage-unit=TEMP' % DUMP_DATE)
m3.assert_any_call('DDBoost delete of %s/%s/%s' % (dump_dir, DUMP_DATE, dump_file), 'gpddboost --del-file=%s/%s/%s --ddboost-storage-unit=TEMP' % (dump_dir, DUMP_DATE, dump_file))
class DeleteOldestDumpsTestCase(unittest.TestCase):
@patch('gppylib.operations.dump.dbconn.DbURL')
@patch('gppylib.operations.dump.DeleteCurrentSegDump.run', return_value=None)
@patch('gppylib.operations.dump.GpArray.initFromCatalog')
@patch('gppylib.operations.dump.GpArray.getDbList', return_value=[])
@patch('gppylib.commands.base.Command.__init__', return_value=None)
@patch('gppylib.commands.base.Command.run', return_value=None)
@patch('gppylib.commands.base.Command.get_results')
def test_delete_old_from_dynamic_ddboost_option(self, m1, m2, m3, m4, m5, m6, m7):
m1.return_value.stdout = '20160101'
m1.return_value.rc = 0
port = 5432
cleanup_date = '20160101'
cleanup_total = 1
dump_dir = 'MFR_TINC'
ddboost = True
storage_unit = 'TEMP'
DeleteOldestDumps(None, port, dump_dir,cleanup_date, cleanup_total, ddboost, storage_unit).run()
with open('/tmp/log', 'w') as fw:
fw.write(str(m3.call_args_list))
m3.assert_any_call('List directories in DDBoost db_dumps dir', 'gpddboost --ddboost-storage-unit %s --listDir --dir=MFR_TINC/ | grep ^[0-9] ' % (storage_unit))
m3.assert_any_call('DDBoost cleanup', 'gpddboost --del-dir=%s/%s --ddboost-storage-unit %s' % (dump_dir, cleanup_date, storage_unit))
if __name__ == '__main__':
unittest.main()
......@@ -66,6 +66,7 @@ class GpCronDumpTestCase(unittest.TestCase):
## Enterprise init
self.incremental = False
self.ddboost = False
self.ddboost_storage_unit = None
self.ddboost_hosts = None
self.ddboost_user = None
self.ddboost_config_remove = False
......@@ -661,6 +662,17 @@ class GpCronDumpTestCase(unittest.TestCase):
with self.assertRaisesRegexp(Exception, '--ddboost is not supported with NetBackup'):
GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
def test_options_ddboost_storage_unit_should_be_used_with_ddboost(self, mock):
"""
--ddboost-storage-unit option must come with --ddboost option
"""
options = GpCronDumpTestCase.Options()
options.dump_databases = ['bkdb']
options.ddboost_storage_unit = "GPDB"
with self.assertRaisesRegexp(Exception, 'Must specify --ddboost option together with the --ddboost-storage-unit'):
GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_get_include_exclude_for_dump_database00(self, mock1, mock2):
......
import imp
import os
import io
from threading import Event
from mock import *
from gp_unittest import *
from gppylib.commands.base import CommandResult
DDBOOST_CONFIG_REMOTE_INFO= """
Data Domain Hostname:ddremote
Data Domain Boost Username:metro
Data Domain default log level:WARNING
Data Domain default log size:50
"""
DDBOOST_CONFIG_INFO= """
Data Domain Hostname:ddlocal
Data Domain Boost Username:metro
Default Backup Directory:MY_BACKUP_DIR
Data Domain Storage Unit:MY_STORAGE_UNIT_NAME
Data Domain default log level:WARNING
Data Domain default log size:50
"""
class GpMfrTestCase(GpTestCase):
def setUp(self):
# load subject after setting env vars
gpmfr_path = os.path.abspath(os.path.dirname(__file__) + "/../../../gpmfr.py")
# GOOD_MOCK_EXAMPLE of environment variables
with patch.dict('os.environ', values = {'GPHOME': 'foo'}):
self.subject = imp.load_source('gpmfr', gpmfr_path)
self.subject.logger = Mock(spec=['log', 'info', 'debug', 'error', 'warn'])
self.popen = Mock()
self.popen.pid = 3
self.popen.stdout.return_value = Mock()
# self.popen.pid.return_value =
self.popen.stdout.readline.return_value = "5 packets received 0% packet loss"
self.popen.communicate.return_value = ('foo', "")
self.popen_class = Mock(return_value=self.popen)
# commands return CommandResults
self.mock_init = Mock(return_value=None)
self.apply_patches([
patch("gpmfr.gpsubprocess.Popen", self.popen_class),
patch('gpmfr.Command.__init__', self.mock_init),
patch('gpmfr.Command.was_successful', return_value=True),
patch('gpmfr.Command.run', return_value=None),
])
@patch('gppylib.commands.base.Command.get_results', return_value=CommandResult(0, DDBOOST_CONFIG_INFO, "", True, False))
def test_listBackups_withDDBoost_should_issue_correct_command(self, mock_results):
p = self.subject.mfr_parser()
mfropt, mfrargs = p.parse_args(['--list'], None)
mfr = self.subject.GpMfr(mfropt, mfrargs)
mfr.run()
self.mock_init.assert_any_call('DD Boost on master', 'foo/bin/gpddboost --ls MY_BACKUP_DIR --ddboost-storage-unit=MY_STORAGE_UNIT_NAME' )
@patch('gppylib.commands.base.Command.get_results', return_value=CommandResult(0, DDBOOST_CONFIG_INFO, "", True, False))
def test_verify_login_local_ddsystem_issue_correct_command(self, mock_results):
ddsystem = self.subject.DDSystem('local', dd_storage_unit='MY_STORAGE_UNIT_NAME')
ddsystem.verifyLogin()
self.mock_init.assert_any_call('DD Boost on master', 'foo/bin/gpddboost --verify --ddboost-storage-unit MY_STORAGE_UNIT_NAME' )
@patch('gppylib.commands.base.Command.get_results', return_value=CommandResult(0, DDBOOST_CONFIG_INFO, "", True, False))
def test_verify_login_remote_ddsystem_issue_correct_command(self, mock_results):
ddsystem = self.subject.DDSystem('remote', dd_storage_unit='MY_STORAGE_UNIT_NAME')
ddsystem.verifyLogin()
self.mock_init.assert_any_call('DD Boost on master', 'foo/bin/gpddboost --verify --ddboost-storage-unit MY_STORAGE_UNIT_NAME --remote' )
@patch('gppylib.commands.base.Command.get_results', return_value=CommandResult(0, DDBOOST_CONFIG_INFO, "", True, False))
def test_delete_file_on_ddsystem_issue_correct_command(self, mock_results):
ddsystem = self.subject.DDSystem('local', dd_storage_unit='MY_STORAGE_UNIT_NAME')
path = 'foo/20160101/gp_dump_1_1_20160101122346.gz'
ddsystem.deleteFile(path)
self.mock_init.assert_any_call('DD Boost on master', 'foo/bin/gpddboost --del-file foo/20160101/gp_dump_1_1_20160101122346.gz --ddboost-storage-unit=MY_STORAGE_UNIT_NAME' )
@patch('gppylib.commands.base.Command.get_results', side_effect=[
CommandResult(0, DDBOOST_CONFIG_INFO, "", True, False),
CommandResult(0, DDBOOST_CONFIG_REMOTE_INFO, "", True, False),
CommandResult(0, DDBOOST_CONFIG_INFO, "", True, False),
CommandResult(0, "gp_dump_1_1_20160101122346.gz 600 691", "", True, False),
CommandResult(0, DDBOOST_CONFIG_REMOTE_INFO, "", True, False),
CommandResult(0, "Replication Source Streams : 75", "", True, False),
CommandResult(0, "Replication Source Streams : 70", "", True, False),
CommandResult(0, "Replication Destination Streams : 80", "", True, False),
CommandResult(0, "Used Filecopy Streams : 0", "", True, False),
CommandResult(0, "Used Filecopy Streams : 0", "", True, False),
CommandResult(0, "Replication gp_dump_1_1_20160101122346.gz completed 100 percent 691 bytes", "", True, False),
CommandResult(0, "Used Filecopy Streams : 0", "", True, False),
])
def test_replicate_on_ddsystem_issues_correct_message(self, mock_results):
with patch('gpmfr.ReplicateWorker') as mock_rep_class:
mock_worker = Mock()
mock_worker.isFailed.return_value = False
mock_worker.isFinished.return_value = True
mock_worker.bytesSent = 691
mock_rep_class.return_value = mock_worker
p = self.subject.mfr_parser()
mfropt, mfrargs = p.parse_args(['--replicate', '20160101122346', '--max-streams', 60, '--master-port', '5432'], None)
mfr = self.subject.GpMfr(mfropt, mfrargs)
expected_message1 = 'Initiating transfer for 1 files from local(ddlocal) to remote(ddremote) Data Domain.'
expected_message2 = 'Backup \'2016-January-01 12:23:46 (20160101122346)\' transferred from local(ddlocal) to remote(ddremote) Data Domain.'
# GOOD_MOCK_EXAMPLE of stdout
with patch('sys.stdout', new=io.BytesIO()) as mock_stdout:
mfr.run()
self.assertIn(expected_message1, mock_stdout.getvalue())
self.assertIn(expected_message2, mock_stdout.getvalue())
@patch('gpmfr.Thread')
@patch('gppylib.commands.base.Command.get_results', return_value=CommandResult(0, DDBOOST_CONFIG_INFO, "", True, False))
def test_replicate_worker_issues_correct_command_for_replicate(self, mock_results, mock_thread_class):
bfile, remoteDD, sourceDD = self.__setup_DDBoost_info()
self.popen.stdout.readline.side_effect = [""]
worker = self.subject.ReplicateWorker(bfile, sourceDD, remoteDD, Event(), Event())
worker.run()
self.popen_class.assert_any_call(['foo/bin/gpddboost', '--replicate', '--from-file', 'localBackupDir/20160421/foo.txt', '--to-file', 'localBackupDir/20160421/foo.txt', '--ddboost-storage-unit', 'localStorageUnit'], stderr=-2, stdout=-1)
@patch('gpmfr.Thread')
@patch('gppylib.commands.base.Command.get_results', return_value=CommandResult(0, DDBOOST_CONFIG_INFO, "", True, False))
def test_replicate_worker_issues_correct_command_for_recover(self, mock_results, mock_thread_class):
bfile, remoteDD, sourceDD = self.__setup_DDBoost_info()
self.popen.stdout.readline.side_effect = [""]
worker = self.subject.ReplicateWorker(bfile, remoteDD, sourceDD, Event(), Event())
worker.run()
self.popen_class.assert_any_call(['foo/bin/gpddboost', '--recover', '--from-file', 'localBackupDir/20160421/foo.txt', '--to-file', 'localBackupDir/20160421/foo.txt', '--ddboost-storage-unit', 'localStorageUnit'], stderr=-2, stdout=-1)
def __setup_DDBoost_info(self):
sourceDD = self.subject.DDSystem('local', 'localBackupDir', 'localStorageUnit')
remoteDD = self.subject.DDSystem('remote', 'remoteBackupDir', 'remoteStorageUnit')
bfile = self.subject.BackupFile("foo.txt", 755, 1234)
bfile.fullPath = "/".join([sourceDD.DDBackupDir, '20160421', bfile.name])
return bfile, remoteDD, sourceDD
if __name__ == '__main__':
run_tests()
......@@ -24,7 +24,8 @@ gpcrondump -x database_name
[--no-owner | --use-set-session-authorization]
[--no-privileges] [--rsyncable]
{ [--ddboost [--replicate --max-streams <max_IO_streams>
[--ddboost-skip-ping] ] ] } |
[--ddboost-skip-ping]
[--ddboost-storage-unit=<storage_unit_name>] ] ] } |
{ [--netbackup-service-host <netbackup_server>
--netbackup-policy <netbackup_policy>
--netbackup-schedule <netbackup_schedule>
......@@ -240,12 +241,25 @@ OPTIONS
--ddboost [--replicate --max-streams <max_IO_streams>
[--ddboost-skip-ping] ]
[--ddboost-skip-ping] [--ddboost-storage-unit=<storage_unit_name>]]
Use Data Domain Boost for this backup. Before using Data Domain Boost,
set up the Data Domain Boost credential, as described in the next option
below.
--ddboost-storage-unit is optional, which provides a dynamic storage
unit where to replicate dump files between source and target DDBoost Servers,
backup directory defaults to the cluster's config file when the Data Domain
Boost credentials were set. If this option is not specified, it also defaults
to the cluster's config file.
Note: storage unit will be created on DDBoost server only if one does
not already exist and following options are not specified:
--incremental, --list-backup-file, --list-filter-tables,
-o, --ddboost-config-remove
The following option is recommended if --ddboost is specified.
* -z option (uncompressed)
......@@ -357,9 +371,10 @@ OPTIONS
Dump optimizer statistics from pg_statistic. Statistics are dumped in the
master data directory to db_dumps/YYYYMMDD/gp_statistics_1_1_<timestamp>.
If --ddboost is specified, the backup is located on the default storage
unit in the directory specified by --ddboost-backupdir when the Data
Domain Boost credentials were set.
If --ddboost is specified, the backup is located on the storage unit
specified by --ddboost-storge-unit, or by default, the storage unit
from cluster's config file, under the default directory configured by
--ddboost-backupdir when the Data Domain Boost credentials were set.
-E <encoding>
......@@ -396,10 +411,10 @@ OPTIONS
files are dumped in the master or segment data directory to
db_dumps/YYYYMMDD/config_files_<timestamp>.tar.
If --ddboost is specified, the backup is located on the default storage
unit in the directory specified by --ddboost-backupdir when the Data
Domain Boost credentials were set.
If --ddboost is specified, the backup is located on the storage unit
specified by --ddboost-storge-unit, or by default, the storage unit
from cluster's config file, under the default directory configured by
--ddboost-backupdir when the Data Domain Boost credentials were set.
-G (dump global objects)
......@@ -407,10 +422,10 @@ OPTIONS
Global objects are dumped in the master data directory to
db_dumps/YYYYMMDD/gp_global_1_1_<timestamp>.
If --ddboost is specified, the backup is located on the default storage
unit in the directory specified by --ddboost-backupdir when the Data
Domain Boost credentials were set.
If --ddboost is specified, the backup is located on the storage unit
specified by --ddboost-storge-unit, or by default, the storage unit
from cluster's config file, under the default directory configured by
--ddboost-backupdir when the Data Domain Boost credentials were set.
-h (record dump details)
......
......@@ -18,7 +18,7 @@ gpdbrestore { -t <timestamp_key> { [-L]
[--truncate] [-e] [-G]
[-B <parallel_processes>]
[-d <master_data_directory>] [-a] [-q] [-l <logfile_directory>]
[-v] [--ddboost ]
[-v] [--ddboost [--ddboost-storage-unit=<storage_unit_name>] ]
[-S <schema_name> [-S ...]]
[--redirect <database_name> ]
[--change-schema=<schema_name> ]
......@@ -177,6 +177,11 @@ OPTIONS
<backup_directory> is set when you specify the Data Domain credentials
with gpcrondump.
--ddboost-storage-unit is optional, which provides a dynamic storage
unit to get dump files from, backup directory defaults to the cluster's
config file when the Data Domain Boost credentials were set. If this
option is not specified, it also defaults to the cluster's config file.
This option is not supported if --netbackup-service-host is specified.
......
......@@ -12,14 +12,16 @@ SYNOPSIS
*****************************************************
gpmfr --delete {LATEST | OLDEST | <timestamp>} [--remote]
[--master-port=<master_port>] [--skip-ping] [-a] [-v | --verbose]
[--master-port=<master_port>] [--ddboost-storage-unit=<storage_unit_name>]
[--skip-ping] [-a] [-v | --verbose]
gpmfr {--replicate | --recover} {LATEST | OLDEST | <timestamp>}
--max-streams <max_IO_streams> [--master-port=<master_port>]
[--skip-ping] [-a] [-q | --quiet] [-v | --verbose]
--max-streams <max_IO_streams> [--ddboost-storage-unit=<storage_unit_name>]
[--master-port=<master_port>] [--skip-ping] [-a] [-q | --quiet] [-v | --verbose]
gpmfr {--list | --list-files {LATEST | OLDEST | <timestamp>} }
[--remote] [--master-port=<master_port>] [--skip-ping]
[--ddboost-storage-unit=<storage_unit_name>] [--remote]
[--master-port=<master_port>] [--skip-ping]
[-v | --verbose]
gpmfr --show-streams [--skip-ping] [-v | --verbose]
......@@ -198,6 +200,12 @@ OPTIONS
system before it can be replicated to the remote Data Domain system.
--ddboost-storage-unit=<storage_unit_name>
The storage unit on DDBoost Data Domain Server, if this option is not
specified, it defaults to the one in the cluster's config file.
--remote
Perform the operation on the remote Data Domain system that is used for
......
......@@ -200,9 +200,10 @@ gp_backup_launch__(PG_FUNCTION_ARGS)
#ifdef USE_DDBOOST
char *pszDDBoostFileName = NULL;
char *pszDDBoostDirName = "db_dumps"; /* Default directory */
char *pszDDBoostStorageUnitName = NULL;
char *dd_boost_buffer_size = NULL;
char *gpDDBoostCmdLine = NULL;
char *temp = NULL, *pch = NULL, *pchs = NULL;
char *temp = NULL, *pch = NULL, *pchs = NULL, *pStu = NULL, *pStus = NULL;
#endif
verifyGpIdentityIsSet();
......@@ -426,8 +427,20 @@ gp_backup_launch__(PG_FUNCTION_ARGS)
else
pszDDBoostDirName = strdup("db_dumps/");
}
free(temp);
pStu = strstr(temp, "--ddboost-storage-unit");
int pStu_len = 0;
if (pStu)
{
pStu = pStu + strlen("--ddboost-storage-unit");
pStus = strtok(pStu, " ");
if (pStus)
{
pszDDBoostStorageUnitName = strdup(pStus);
pStu_len = strlen(pszDDBoostStorageUnitName);
}
}
free(temp);
/* Create the gpddboost parameter string */
len = strlen(pszDDBoostDirName)
......@@ -436,9 +449,11 @@ gp_backup_launch__(PG_FUNCTION_ARGS)
+ strlen("_post_data")
+ strlen(".gz")
+ strlen(" --to-file= ")
+ strlen(" --ddboost-storage-unit= ")
+ strlen(" --write-file-from-stdin ")
+ strlen(" --dd_boost_buf_size= ")
+ strlen(gpDDBoostPg)
+ pStu_len
+ 20;
gpDDBoostCmdLine = (char *) palloc(len);
......@@ -446,9 +461,19 @@ gp_backup_launch__(PG_FUNCTION_ARGS)
if (pszDDBoostFileName == NULL)
elog(ERROR, "\nDDboost filename is NULL\n");
sprintf(gpDDBoostCmdLine, "%s --write-file-from-stdin --to-file=%s/%s --dd_boost_buf_size=%s",
gpDDBoostPg, pszDDBoostDirName, pszDDBoostFileName, dd_boost_buffer_size);
sprintf(gpDDBoostCmdLine,
"%s --write-file-from-stdin --to-file=%s/%s --dd_boost_buf_size=%s ",
gpDDBoostPg,
pszDDBoostDirName,
pszDDBoostFileName,
dd_boost_buffer_size);
if (pszDDBoostStorageUnitName)
{
sprintf(gpDDBoostCmdLine + strlen(gpDDBoostCmdLine),
"--ddboost-storage-unit=%s ",
pszDDBoostStorageUnitName);
}
}
......@@ -781,9 +806,17 @@ gp_backup_launch__(PG_FUNCTION_ARGS)
pszDDBoostFileName = formDDBoostFileName(pszBackupKey, true, is_compress);
memset(gpDDBoostCmdLine, 0, strlen(gpDDBoostCmdLine));
sprintf(gpDDBoostCmdLine, "%s --write-file-from-stdin --to-file=%s/%s --dd_boost_buf_size=%s",
gpDDBoostPg, pszDDBoostDirName, pszDDBoostFileName, dd_boost_buffer_size);
sprintf(gpDDBoostCmdLine,
"%s --write-file-from-stdin --to-file=%s/%s "
"--dd_boost_buf_size=%s ",
gpDDBoostPg,
pszDDBoostDirName,
pszDDBoostFileName,
dd_boost_buffer_size);
if (pszDDBoostStorageUnitName)
sprintf(gpDDBoostCmdLine + strlen(gpDDBoostCmdLine), "--ddboost-storage-unit=%s", pszDDBoostStorageUnitName);
/* if user selected a compression program */
if (pszCompressionProgram[0] != '\0')
......
......@@ -134,6 +134,7 @@ PQExpBuffer dump_prefix_buf = NULL;
#ifdef USE_DDBOOST
#include "ddp_api.h"
static int dd_boost_enabled = 0;
static char *ddboost_storage_unit = NULL;
#endif
int
......@@ -177,7 +178,7 @@ main(int argc, char **argv)
goto cleanup;
mpp_msg(logInfo, progname, "Reading Greenplum Database configuration info from master database.\n");
if (!GetDumpSegmentDatabaseArray(master_db_conn, remote_version, &segDBAr, inputOpts.actors,
if (!GetDumpSegmentDatabaseArray(master_db_conn, remote_version, &segDBAr, inputOpts.actors,
inputOpts.pszRawDumpSet, inputOpts.pszDBName, inputOpts.pszUserName, dataOnly, schemaOnly))
goto cleanup;
......@@ -828,6 +829,7 @@ fillInputOptions(int argc, char **argv, InputOptions * pInputOpts)
#ifdef USE_DDBOOST
{"ddboost", no_argument, NULL, 6},
{"ddboost-storage-unit", required_argument, NULL, 21},
#endif
{"table-file", required_argument, NULL, 7},
{"exclude-table-file", required_argument, NULL, 8},
......@@ -1157,6 +1159,10 @@ fillInputOptions(int argc, char **argv, InputOptions * pInputOpts)
case 6:
dd_boost_enabled = 1;
break;
case 21:
ddboost_storage_unit = pg_strdup(optarg);
pInputOpts->pszPassThroughParms = addPassThroughLongParm("ddboost-storage-unit", ddboost_storage_unit, pInputOpts->pszPassThroughParms);
break;
#endif
case 7:
/* table-file option */
......@@ -1210,7 +1216,7 @@ fillInputOptions(int argc, char **argv, InputOptions * pInputOpts)
goto cleanup;
}
break;
case 11:
no_expand_children = true;
break;
......@@ -1293,12 +1299,12 @@ fillInputOptions(int argc, char **argv, InputOptions * pInputOpts)
{
pInputOpts->pszPassThroughParms = addPassThroughLongParm("dd_boost_enabled", NULL, pInputOpts->pszPassThroughParms);
/* If no directory is specified, for example when we gp_dump, then dump to default directory db_dumps */
/* If no directory is specified, for example when we gp_dump, then dump to default directory db_dumps */
if (pInputOpts->pszBackupDirectory)
ddboost_directory = pg_strdup(pInputOpts->pszBackupDirectory);
else
ddboost_directory = pg_strdup("db_dumps/");
pInputOpts->pszPassThroughParms = addPassThroughLongParm("dd_boost_dir", ddboost_directory, pInputOpts->pszPassThroughParms);
}
#endif
......@@ -1391,7 +1397,7 @@ fillInputOptions(int argc, char **argv, InputOptions * pInputOpts)
if (pInputOpts->pszPassThroughParms != NULL)
mpp_msg(logInfo, progname, "Read params: %s\n", pInputOpts->pszPassThroughParms);
else
mpp_msg(logInfo, progname, "Read params: <empty>\n");
mpp_msg(logInfo, progname, "Read params: <empty>\n");
cleanup:
......@@ -1487,6 +1493,7 @@ help(const char *progname)
printf((" or (i)ndividual segdb (must be followed with a list of dbids\n"));
printf((" of primary segments to dump. For example: --gp-s=i[10,12,14]\n"));
printf((" --rsyncable pass --rsyncable option to gzip"));
printf((" --ddboost-storage-unit pass the storage unit name"));
printf(("\nIf no database name is supplied, then the PGDATABASE environment\n"
"variable value is used.\n\n"));
......@@ -2150,7 +2157,7 @@ threadProc(void *arg)
* another thread failing. A BackupStateMachine object is used to manage
* receiving these notifications
*/
time(&now);
time(&last);
......@@ -2173,8 +2180,8 @@ threadProc(void *arg)
bSentCancelMessage = true;
goto cleanup;
}
/* Replacing select() by poll() here to overcome the limitations of
/* Replacing select() by poll() here to overcome the limitations of
select() to handle large socket file descriptor values.
*/
......
......@@ -279,7 +279,7 @@ char *formPostDumpFilePathName(char *pszBackupDirectory, char *pszBackupKey, int
#include "ddp_api.h"
static int dd_boost_enabled = 0; /* Is set to 1 if we are doing a backup onto Data Domain system */
static int dd_boost_buf_size = 0;
static char *ddboost_storage_unit = NULL;
static void dumpDatabaseDefinitionToDDBoost(void);
#ifndef MAX_PATH_NAME
......@@ -300,12 +300,11 @@ static ddp_inst_desc_t ddp_inst = DDP_INVALID_DESCRIPTOR;
static ddp_conn_desc_t ddp_conn = DDP_INVALID_DESCRIPTOR;
static ddp_path_t path1 = {0};
static char *DDP_SU_NAME = NULL;
static char *DEFAULT_BACKUP_DIRECTORY = NULL;
char *log_message_path = NULL;
static int createDDBoostDir(ddp_conn_desc_t ddp_conn, char *storage_unit_name, char *path_name);
static int createDDBoostDir(ddp_conn_desc_t ddp_conn, char *ddboost_storage_unit, char *path_name);
static int updateArchiveWithDDFile(ArchiveHandle *AH, char *g_pszDDBoostFile, const char *g_pszDDBoostDir);
#endif
......@@ -513,6 +512,7 @@ main(int argc, char **argv)
{"dd_boost_enabled", no_argument, NULL, 7},
{"dd_boost_dir", required_argument, NULL, 8},
{"dd_boost_buf_size", required_argument, NULL, 9},
{"ddboost-storage-unit", required_argument, NULL, 18},
#endif
{"incremental-filter", required_argument, NULL, 10},
{"netbackup-service-host", required_argument, NULL, 11},
......@@ -762,6 +762,9 @@ main(int argc, char **argv)
case 9:
sscanf(optarg, "%d", &dd_boost_buf_size);
break;
case 18:
ddboost_storage_unit = pg_strdup(optarg);
break;
#endif
case 10:
incrementalFilter = pg_strdup(optarg);
......@@ -886,7 +889,8 @@ main(int argc, char **argv)
exit(1);
}
ret = initDDSystem(&ddp_inst, &ddp_conn, &dd_client_info, &DDP_SU_NAME, false, &DEFAULT_BACKUP_DIRECTORY, false);
ret = initDDSystem(&ddp_inst, &ddp_conn, &dd_client_info, &ddboost_storage_unit, false, &DEFAULT_BACKUP_DIRECTORY, false);
if (ret)
{
mpp_err_msg(logError, progname, "Error connecting to DDboost. Check parameters\n");
......@@ -6229,7 +6233,7 @@ dumpTableSchema(Archive *fout, TableInfo *tbinfo)
appendPQExpBuffer(q, "\n");
/*
* MPP-25549: Dump ALTER statements for subpartition tables being
* MPP-25549: Dump ALTER statements for subpartition tables being
* set to different schema other than the parent
*/
if (g_gp_supportsPartitioning)
......@@ -7495,17 +7499,17 @@ monitorThreadProc(void *arg __attribute__((unused)))
while (!bGotFinished)
{
/* Replacing select() by poll() here to overcome the limitations of
/* Replacing select() by poll() here to overcome the limitations of
select() to handle large socket file descriptor values.
*/
pollInput->fd = sock;
pollInput->events = POLLIN;
pollInput->revents = 0;
pollInput->revents = 0;
pollTimeout = 2000;
pollResult = poll(pollInput, 1, pollTimeout);
if(pollResult < 0)
if(pollResult < 0)
{
mpp_err_msg(logError, progname, "poll failed for backup key %s, instid %d, segid %d failed\n",
g_CDBDumpKey, g_role, g_dbID);
......@@ -7943,7 +7947,7 @@ dumpDatabaseDefinitionToDDBoost()
* Make sure we can create this file before we spin off sh cause we don't
* get a good error message from sh if we can't write to the file
*/
path1.su_name = DDP_SU_NAME;
path1.su_name = ddboost_storage_unit;
path1.path_name = g_pszDDBoostDir;
err = createDDBoostDir(ddp_conn, path1.su_name, path1.path_name);
......@@ -8182,7 +8186,7 @@ updateArchiveWithDDFile(ArchiveHandle *AH, char *g_pszDDBoostFile, const char *g
int err = 0;
char *dir_name = "db_dumps";
path1.su_name = DDP_SU_NAME;
path1.su_name = ddboost_storage_unit;
if (g_pszDDBoostDir)
path1.path_name = pg_strdup(g_pszDDBoostDir);
......@@ -8198,7 +8202,7 @@ updateArchiveWithDDFile(ArchiveHandle *AH, char *g_pszDDBoostFile, const char *g
}
int
createDDBoostDir(ddp_conn_desc_t ddp_conn, char *storage_unit_name, char *path_name)
createDDBoostDir(ddp_conn_desc_t ddp_conn, char *ddboost_storage_unit, char *path_name)
{
char *pch = NULL;
ddp_path_t path = {0};
......@@ -8211,7 +8215,7 @@ createDDBoostDir(ddp_conn_desc_t ddp_conn, char *storage_unit_name, char *path_n
pch = strtok(path_name, " /");
while(pch != NULL)
{
path.su_name = storage_unit_name;
path.su_name = ddboost_storage_unit;
strcat(full_path, "/");
strcat(full_path, pch);
path.path_name = full_path;
......
......@@ -24,12 +24,13 @@
#include "cdb_dump_util.h"
#define DDP_CL_DDP 1
#define DEFAULT_STORAGE_UNIT "GPDB"
static char predump_errmsg[1024];
bool shouldDumpSchemaOnly(int g_role, bool incrementalBackup, void *list) {
if (g_role != ROLE_SEGDB || !incrementalBackup)
return false;
return false;
if (list)
return false;
......@@ -104,7 +105,7 @@ FreeInputOptions(InputOptions * pInputOpts)
/* hard coded as gzip for now, no need to free
if ( pInputOpts->pszCompressionProgram != NULL )
free( pInputOpts->pszCompressionProgram );
free( pInputOpts->pszCompressionProgram );
*/
if (pInputOpts->pszPassThroughParms != NULL)
......@@ -175,8 +176,8 @@ shouldExpandChildren(bool g_gp_supportsPartitioning, bool no_expand_children)
}
/*
* isFilteringAllowed: This method checks if we should filter out tables based on
* whether we are using incremental mode for backup and if
* isFilteringAllowed: This method checks if we should filter out tables based on
* whether we are using incremental mode for backup and if
* we are on the master
* Arguments:
* role - The role of the segment E.g ROLE_MASTER, NON_MASTER etc
......@@ -187,7 +188,7 @@ shouldExpandChildren(bool g_gp_supportsPartitioning, bool no_expand_children)
*
*/
bool
isFilteringAllowedNow(int role, bool incrementalBackup, char *incrementalFilter)
isFilteringAllowedNow(int role, bool incrementalBackup, char *incrementalFilter)
{
if (!incrementalBackup)
return true;
......@@ -822,7 +823,7 @@ GetTimestampKey(char* timestamp_key)
if (!timestamp_key){
mpp_err_msg("INFO", "GetTimestampKey", "Timestamp key is generated as it is not provided by the user.\n");
return GenerateTimestampKey();
}
}
/* User has provided a valid timestamp, we simply use that */
return strdup(timestamp_key);
......@@ -961,7 +962,7 @@ parseDbidSet(int *dbidset, char *dump_set)
return count;
}
const char*
const char*
getBackupTypeString(bool incremental)
{
if (incremental)
......@@ -974,7 +975,7 @@ getBackupTypeString(bool incremental)
}
}
char*
char*
formCompressionProgramString(char* compPg)
{
char extra[] = " -c ";
......@@ -986,9 +987,9 @@ formCompressionProgramString(char* compPg)
return retVal;
}
void
void
formPostDataSchemaOnlyPsqlCommandLine(char** retVal, const char* inputFileSpec, bool compUsed, const char* compProg,
const char* post_data_filter_script, const char* table_filter_file,
const char* post_data_filter_script, const char* table_filter_file,
const char* psqlPg, const char* catPg,
const char* gpNBURestorePg, const char* netbackupServiceHost, const char* netbackupBlockSize,
const char* change_schema_file, const char *schema_level_file)
......@@ -1024,9 +1025,9 @@ formPostDataSchemaOnlyPsqlCommandLine(char** retVal, const char* inputFileSpec,
strcat(pszCmdLine, " | ");
strcat(pszCmdLine, psqlPg);
}
else
{
}
else
{
if (netbackupServiceHost)
{
strncpy(pszCmdLine, gpNBURestorePg, (1 + strlen(gpNBURestorePg)));
......@@ -1056,14 +1057,14 @@ formPostDataSchemaOnlyPsqlCommandLine(char** retVal, const char* inputFileSpec,
strcat(pszCmdLine, " | ");
strcat(pszCmdLine, psqlPg);
}
}
}
}
/* Build command line for gp_restore_agent */
void
void
formSegmentPsqlCommandLine(char** retVal, const char* inputFileSpec, bool compUsed, const char* compProg,
const char* filter_script, const char* table_filter_file,
const char* filter_script, const char* table_filter_file,
int role, const char* psqlPg, const char* catPg,
const char* gpNBURestorePg, const char* netbackupServiceHost, const char* netbackupBlockSize,
const char* change_schema_file, const char *schema_level_file)
......@@ -1125,8 +1126,8 @@ formSegmentPsqlCommandLine(char** retVal, const char* inputFileSpec, bool compUs
}
/* Build command line with gprestore_filter.py and its passed through parameters */
void
formFilterCommandLine(char** retVal, const char* filter_script, const char* table_filter_file,
void
formFilterCommandLine(char** retVal, const char* filter_script, const char* table_filter_file,
int role, const char* change_schema_file, const char *schema_level_file)
{
char* pszCmdLine = *retVal;
......@@ -1162,8 +1163,8 @@ formFilterCommandLine(char** retVal, const char* filter_script, const char* tabl
}
/* Build command line with gprestore_post_data_filter.py and its passed through parameters */
void
formPostDataFilterCommandLine(char** retVal, const char* post_data_filter_script, const char* table_filter_file,
void
formPostDataFilterCommandLine(char** retVal, const char* post_data_filter_script, const char* table_filter_file,
const char* change_schema_file, const char *schema_level_file)
{
char* pszCmdLine = *retVal;
......@@ -1226,7 +1227,7 @@ static int setLBEnv(void);
static int createLB(clbHandle* LB,char* name);
static int openLB(clbHandle* LB,char* name);
static int validateDDBoostCredential(char *hostname, char *user, char *password, char* log_level ,char* log_size, char *default_backup_directory, bool remote);
int getDDBoostCredential(char** hostname, char** user, char** password, char** log_level ,char** log_size, char **default_backup_directory, bool remote);
int getDDBoostCredential(char** hostname, char** user, char** password, char** log_level ,char** log_size, char **default_backup_directory, char **ddboost_storage_unit, bool remote);
/*
* Set the environment variable LD_LIBRARY_PATH in order to dynamically load LB's libraries.
......@@ -1315,6 +1316,12 @@ setItem(clbHandle* LB, char *key, char *value)
return 0;
}
static int
setItemWithDefault(clbHandle *LB, char *key, char *value, char *defaultValue)
{
return setItem(LB, key, value ?: defaultValue);
}
static int
getItem(clbHandle* LB, char *key, char **value)
{
......@@ -1365,11 +1372,11 @@ createLB(clbHandle* LB,char* name)
clb_pass[i] = _base64[rand() % _base64_len];
}
clb_pass[34] = '\0';
/*
/*
* for creating the lockbox we should call to clb_create.
* this function needs a password with at least 8 characters, with several constraints.
* the password is set to optional few lines later, but we must initialize it during the LB creation.
* the password is set to optional few lines later, but we must initialize it during the LB creation.
* of course we don't want to use fixed password, so we're using a random password
*/
mpp_err_msg("INFO", "ddboost", "creating LB on %s\n", filepath);
......@@ -1398,8 +1405,8 @@ static int
openLB(clbHandle* LB,char* name)
{
int iError;
char filepath[PATH_MAX];
char *home = getenv("HOME");
char filepath[PATH_MAX];
char *home = getenv("HOME");
char* eMsg = NULL;
if (NULL == home)
......@@ -1407,10 +1414,10 @@ openLB(clbHandle* LB,char* name)
mpp_err_msg("ERROR", "ddboost", "HOME undefined, can't set ddboost credentials\n");
return -1;
}
memset(filepath, 0, PATH_MAX);
snprintf(filepath, strlen(home) + strlen(name) + 2, "%s/%s", home, name);
if (setLBEnv() < 0)
{
return -1;
......@@ -1432,9 +1439,11 @@ openLB(clbHandle* LB,char* name)
* Returns 0 in case of success, and -1 otherwise.
*/
int
setDDBoostCredential(char *hostname, char *user, char *password, char* log_level ,char* log_size, char *default_backup_directory, bool remote)
setDDBoostCredential(char *hostname, char *user, char *password, char* log_level ,char* log_size, char *default_backup_directory, char *ddboost_storage_unit, bool remote)
{
/* TODO: validate default backup directory name if needed */
/* TODO: validate default backup directory name if needed
TODO: validate storage unit
*/
if (validateDDBoostCredential(hostname, user, password, log_level , log_size, default_backup_directory, remote))
return -1;
......@@ -1455,44 +1464,27 @@ setDDBoostCredential(char *hostname, char *user, char *password, char* log_level
return -1;
if (setItem(&LB , "password",password))
return -1;
if (!remote)
{
if (setItem(&LB , "default_backup_directory",default_backup_directory))
return -1;
}
if (log_level)
{
if (setItem(&LB , "log_level",log_level))
return -1;
}
else
int ret_code = 0;
if (!remote)
{
if (setItem(&LB , "log_level","WARNING"))
return -1;
ret_code |= setItem(&LB , "default_backup_directory",default_backup_directory);
ret_code |= setItemWithDefault(&LB, "ddboost_storage_unit", ddboost_storage_unit, DEFAULT_STORAGE_UNIT);
}
if (log_size)
{
if (setItem(&LB , "log_size",log_size))
return -1;
}
else
{
if (setItem(&LB , "log_size","50"))
return -1;
}
ret_code |= setItemWithDefault(&LB, "log_level", log_level, "WARNING");
ret_code |= setItemWithDefault(&LB, "log_size", log_size, "50");
clb_close(LB);
return 0;
return ret_code;
}
int
getDDBoostCredential(char** hostname, char** user, char** password, char **log_level ,char** log_size, char **default_backup_directory, bool remote)
getDDBoostCredential(char** hostname, char** user, char** password, char **log_level ,char** log_size, char **default_backup_directory, char **ddboost_storage_unit, bool remote)
{
clbHandle LB;
if (remote)
{
if (openLB(&LB,"DDBOOST_MFR_CONFIG"))
......@@ -1511,13 +1503,17 @@ getDDBoostCredential(char** hostname, char** user, char** password, char **log_l
return -1;
if (!remote)
{
if (getItem(&LB , "default_backup_directory",default_backup_directory))
if (getItem(&LB , "default_backup_directory", default_backup_directory))
return -1;
if (getItem(&LB , "ddboost_storage_unit", ddboost_storage_unit))
return -1;
}
if (getItem(&LB , "log_level",log_level))
return -1;
if (getItem(&LB , "log_size",log_size))
return -1;
clb_close(LB);
return 0;
}
......@@ -1583,16 +1579,16 @@ validateDDBoostCredential(char *hostname, char *user, char *password, char* log_
int
parseDDBoostCredential(char *hostname, char *user, char *password, const char *progName)
{
char filepath[PATH_MAX];
char filepath[PATH_MAX];
char *home = getenv("HOME");
char line[PATH_MAX];
if (NULL == home)
{
mpp_err_msg("ERROR", progName, "HOME undefined, can't set ddboost credentials\n");
return -1;
}
memset(filepath, 0, PATH_MAX);
snprintf(filepath, strlen(home) + strlen(DDBOOST_CONFIG_FILE) + 2, "%s/%s", home, DDBOOST_CONFIG_FILE);
......@@ -1687,13 +1683,13 @@ parseDDBoostCredential(char *hostname, char *user, char *password, const char *p
void rotate_dd_logs(const char *file_name, unsigned int num_of_files, unsigned int log_size)
{
struct stat st;
if (stat(file_name,&st) == 0)
{
unsigned int size = (unsigned int)st.st_size;
if (size > log_size)
{
char tmp_name[80];
char next_tmp_name[80];
sprintf(tmp_name,"%s_%u",file_name,num_of_files);
......@@ -1701,24 +1697,24 @@ void rotate_dd_logs(const char *file_name, unsigned int num_of_files, unsigned i
if (r != 0)
mpp_err_msg("INFO","rotate_dd_logs","didn't delete of %s , %s\n" ,tmp_name, strerror( errno ));
for (unsigned int i = num_of_files - 1; i > 0; i--){
for (unsigned int i = num_of_files - 1; i > 0; i--){
snprintf(next_tmp_name, 80, "%s_%u",file_name,i + 1);
snprintf(tmp_name,80 ,"%s_%u", file_name,i);
if (rename(tmp_name, next_tmp_name) != 0)
mpp_err_msg("INFO","rotate_dd_logs","didn't rename of %s to %s : %s\n" ,tmp_name,next_tmp_name,strerror( errno ));
}
snprintf(next_tmp_name, 80, "%s_%u",file_name,1);
if ((r = rename(file_name, next_tmp_name)) != 0)
mpp_err_msg("INFO","rotate_dd_logs","didn't rename first log %s to %s : %s" ,tmp_name,next_tmp_name,strerror( errno ));
}
}
else
mpp_err_msg("INFO","rotate_dd_logs","failed to find size");
}
/* Initialize the file for logging DDboost related information */
void
void
_ddp_test_log(const void *session_ptr, const ddp_char_t *log_msg, ddp_severity_t severity)
{
......@@ -1729,12 +1725,12 @@ _ddp_test_log(const void *session_ptr, const ddp_char_t *log_msg, ddp_severity_t
time_t ltime;
struct tm *Tm;
char file_name[] = "libDDBoost.log";
rotate_dd_logs(file_name, DDBOOST_LOG_NUM_OF_FILES, ddboost_logs_info.logsSize / DDBOOST_LOG_NUM_OF_FILES);
log_file = fopen(file_name, "a");
if (log_file) {
ltime = time(NULL);
Tm = localtime(&ltime);
......@@ -1746,18 +1742,18 @@ _ddp_test_log(const void *session_ptr, const ddp_char_t *log_msg, ddp_severity_t
}
}
int
initDDSystem(ddp_inst_desc_t *ddp_inst, ddp_conn_desc_t *ddp_conn, ddp_client_info_t *cl_info, char **ddp_su_name,
int
initDDSystem(ddp_inst_desc_t *ddp_inst, ddp_conn_desc_t *ddp_conn, ddp_client_info_t *cl_info, char **ddboost_storage_unit,
bool createStorageUnit, char **default_backup_directory, bool remote)
{
int err = DD_ERR_NONE;
unsigned int POOL_SIZE = DDBOOST_POOL_SIZE;
char *storage_unit_name = NULL;
char *dd_boost_username = NULL;
char *dd_boost_passwd = NULL;
char *dd_boost_hostname = NULL;
char *log_level = NULL;
char *log_size = NULL;
char *storage_unit_configured = NULL;
err = getDDBoostCredential(&dd_boost_hostname,
&dd_boost_username,
......@@ -1765,23 +1761,21 @@ initDDSystem(ddp_inst_desc_t *ddp_inst, ddp_conn_desc_t *ddp_conn, ddp_client_in
&log_level,
&log_size,
default_backup_directory,
&storage_unit_configured,
remote);
if (*ddboost_storage_unit == NULL)
*ddboost_storage_unit = Safe_strdup(storage_unit_configured);
free(storage_unit_configured);
if (err)
{
mpp_err_msg("ERROR", "ddboost", "Parsing DDBoost login credentials failed\n");
return -1;
}
storage_unit_name = (char*)malloc(PATH_MAX);
if (storage_unit_name == NULL)
{
mpp_err_msg("ERROR", "ddboost", "Memory allocation failed during DDBoost initialization\n");
return -1;
}
sprintf(storage_unit_name, "%s", "GPDB");
*ddp_su_name = storage_unit_name;
if (*ddp_inst == DDP_INVALID_DESCRIPTOR)
{
err = ddp_instance_create(POOL_SIZE, cl_info, ddp_inst);
......@@ -1794,8 +1788,8 @@ initDDSystem(ddp_inst_desc_t *ddp_inst, ddp_conn_desc_t *ddp_conn, ddp_client_in
ddp_log_init(*ddp_inst, NULL, _ddp_test_log);
}
err = ddp_connect_with_user_pwd(*ddp_inst, dd_boost_hostname, NULL, dd_boost_username, dd_boost_passwd, ddp_conn);
err = ddp_connect_with_user_pwd(*ddp_inst, dd_boost_hostname, NULL, dd_boost_username, dd_boost_passwd, ddp_conn);
if (err != DD_ERR_NONE) {
mpp_err_msg("ERROR", "ddboost", "ddboost connect failed. Err = %d, remote = %d\n", err, remote);
return err;
......@@ -1803,7 +1797,7 @@ initDDSystem(ddp_inst_desc_t *ddp_inst, ddp_conn_desc_t *ddp_conn, ddp_client_in
if (createStorageUnit)
{
err = ddp_create_storage_unit(*ddp_conn, storage_unit_name);
err = ddp_create_storage_unit(*ddp_conn, *ddboost_storage_unit);
if (err != DD_ERR_NONE) {
mpp_err_msg("ERROR", "ddboost", "ddboost create storage unit failed. Err = %d\n", err);
return err;
......@@ -1820,20 +1814,19 @@ initDDSystem(ddp_inst_desc_t *ddp_inst, ddp_conn_desc_t *ddp_conn, ddp_client_in
ddboost_logs_info.logLevel = DDP_SEV_ERROR;
else if (!strncmp("NONE",log_level,4))
ddboost_logs_info.logLevel = DDP_SEV_NONE;
ddboost_logs_info.logsSize = atoi(log_size)*1024*1024;
ddboost_logs_info.logsSize = atoi(log_size)*1024*1024;
return 0;
}
void
formDDBoostPsqlCommandLine(char** retVal, bool compUsed, const char* ddboostPg, const char* compProg,
formDDBoostPsqlCommandLine(char** retVal, bool compUsed, const char* ddboostPg, const char* compProg,
const char* ddp_file_name, const char* dd_boost_buf_size,
const char* filter_script, const char* table_filter_file,
const char* filter_script, const char* table_filter_file,
int role, const char* psqlPg, bool postSchemaOnly,
const char* change_schema_file, const char *schema_level_file)
const char* change_schema_file, const char *schema_level_file,
const char* ddboost_storage_unit)
{
char* pszCmdLine = *retVal;
......@@ -1841,12 +1834,17 @@ formDDBoostPsqlCommandLine(char** retVal, bool compUsed, const char* ddboostPg,
strcat(pszCmdLine, " --readFile");
strcat(pszCmdLine, " --from-file=");
strcat(pszCmdLine, ddp_file_name);
if(compUsed)
{
strcat(pszCmdLine, ".gz");
}
if (ddboost_storage_unit)
{
strcat(pszCmdLine, " --ddboost-storage-unit=");
strcat(pszCmdLine, ddboost_storage_unit);
}
strcat(pszCmdLine, " --dd_boost_buf_size=");
strcat(pszCmdLine, dd_boost_buf_size);
......@@ -1858,7 +1856,7 @@ formDDBoostPsqlCommandLine(char** retVal, bool compUsed, const char* ddboostPg,
if (postSchemaOnly)
formPostDataFilterCommandLine(&pszCmdLine, filter_script, table_filter_file, change_schema_file, schema_level_file);
else
else
formFilterCommandLine(&pszCmdLine, filter_script, table_filter_file, role, change_schema_file, schema_level_file);
strcat(pszCmdLine, " | ");
......@@ -1888,7 +1886,7 @@ const char EMPTY_TYPSTORAGE = '\0';
int
initializeHashTable(int num_elems)
{
{
HASH_TABLE_SIZE = num_elems;
if(!(hash_table = (Node **)calloc(HASH_TABLE_SIZE, sizeof(Node*))))
......@@ -1908,7 +1906,7 @@ insertIntoHashTable(Oid o, char t)
Node *new_node = (Node *)malloc(sizeof(Node));
if (!new_node)
if (!new_node)
return -1;
new_node->oid = o;
......@@ -1953,8 +1951,8 @@ char getTypstorage(Oid o)
return temp->typstorage;
temp = temp->next;
}
return EMPTY_TYPSTORAGE;
return EMPTY_TYPSTORAGE;
}
int removeNode(Oid o)
......
......@@ -28,13 +28,13 @@
#define DDBOOST_POOL_SIZE (32 * 1024 * 2048)
#endif
extern int getDDBoostCredential(char** hostname, char** user, char** password, char** log_level ,char** log_size, char **default_backup_directory, bool remote);
extern int setDDBoostCredential(char *hostname, char *user, char *password, char *log_level ,char *log_size, char *default_backup_directory, bool remote);
extern int getDDBoostCredential(char** hostname, char** user, char** password, char** log_level ,char** log_size, char **default_backup_directory, char **ddboost_storage_unit, bool remote);
extern int setDDBoostCredential(char *hostname, char *user, char *password, char *log_level ,char *log_size, char *default_backup_directory, char *ddboost_storage_unit, bool remote);
extern int parseDDBoostCredential(char *hostname, char *user, char *password, const char *progName);
extern void rotate_dd_logs(const char *file_name, unsigned int num_of_files, unsigned int log_size);
extern void _ddp_test_log(const void *session_ptr, const ddp_char_t *log_msg, ddp_severity_t severity);
extern int initDDSystem(ddp_inst_desc_t *ddp_inst, ddp_conn_desc_t *ddp_conn, ddp_client_info_t *cl_info,
char **dd_su_name, bool createStorageUnit, char **default_backup_directory, bool remote);
char **ddboost_storage_unit, bool createStorageUnit, char **default_backup_directory, bool remote);
#endif
/* --------------------------------------------------------------------------------------------------
......@@ -81,8 +81,8 @@ extern void DoCancelNotifyListen(PGconn *pConn, bool bListen,
const char *pszSuffix);
/* Checks if we should filter out tables when using incremental mode */
extern bool isFilteringAllowedNow(int role, bool incrementalBackup, char *incrementalFilter);
extern bool isFilteringAllowedNow(int role, bool incrementalBackup, char *incrementalFilter);
/* frees data allocated inside an InputOptions struct */
extern void FreeInputOptions(InputOptions * pInputOpts);
......@@ -159,19 +159,20 @@ extern char *Base64ToData(char *pszIn, unsigned int *pOutLen);
extern char *nextToken(register char **stringp, register const char *delim);
extern int parseDbidSet(int *dbidset, char *dump_set);
extern char* formCompressionProgramString(char* compPg);
extern void formDDBoostPsqlCommandLine(char** retVal, bool compUsed, const char* ddboostPg, const char* compProg,
extern void formDDBoostPsqlCommandLine(char** retVal, bool compUsed, const char* ddboostPg, const char* compProg,
const char* ddp_file_name, const char* dd_boost_buf_size,
const char* filter_script, const char* table_filter_file,
const char* filter_script, const char* table_filter_file,
int role, const char* psqlPg, bool postSchemaOnly,
const char* change_schema_file, const char *schema_level_file);
const char* change_schema_file, const char *schema_level_file,
const char* ddboost_storage_unit);
extern void formSegmentPsqlCommandLine(char** retVal, const char* inputFileSpec,
bool compUsed, const char* compProg, const char* filter_script,
extern void formSegmentPsqlCommandLine(char** retVal, const char* inputFileSpec,
bool compUsed, const char* compProg, const char* filter_script,
const char* table_filter_file, int role, const char* psqlPg, const char* catPg,
const char* gpNBURestorePg, const char* netbackupServiceHost, const char* netbackupBlockSize,
const char* change_schema, const char* schema_level_file);
extern void formPostDataSchemaOnlyPsqlCommandLine(char** retVal, const char* inputFileSpec,
extern void formPostDataSchemaOnlyPsqlCommandLine(char** retVal, const char* inputFileSpec,
bool compUsed, const char* compProg, const char* post_data_filter_script,
const char* table_filter_file, const char* psqlPg, const char* catPg,
const char* gpNBURestorePg, const char* netbackupServiceHost, const char* netbackupBlockSize,
......
......@@ -300,6 +300,7 @@ usage(void)
printf((" --prefix=PREFIX PREFIX of the dump files to be restored\n"));
printf((" --change-schema-file=SCHEMA_FILE Schema file containing the name of the schema to which tables are to be restored\n"));
printf((" --schema-level-file=SCHEMA_FILE Schema file containing the name of the schemas under which all tables are to be restored\n"));
printf((" --ddboost-storage-unit pass the storage unit name"));
}
bool
......@@ -368,6 +369,7 @@ fillInputOptions(int argc, char **argv, InputOptions * pInputOpts)
#ifdef USE_DDBOOST
{"ddboost", no_argument, NULL, 10},
{"ddboost-storage-unit", required_argument, NULL, 19},
#endif
{"gp-f", required_argument, NULL, 11},
......@@ -567,7 +569,7 @@ fillInputOptions(int argc, char **argv, InputOptions * pInputOpts)
opts->triggerNames = strdup(optarg);
pInputOpts->pszPassThroughParms = addPassThroughParm( c, optarg, pInputOpts->pszPassThroughParms );
break;
*/
*/
case 's': /* dump schema only */
opts->schemaOnly = 1;
schemaOnly = true;
......@@ -730,6 +732,9 @@ fillInputOptions(int argc, char **argv, InputOptions * pInputOpts)
case 10:
dd_boost_enabled = 1;
break;
case 19:
pInputOpts->pszPassThroughParms = addPassThroughLongParm("ddboost-storage-unit", optarg, pInputOpts->pszPassThroughParms);
break;
#endif
case 11:
pInputOpts->pszPassThroughParms = addPassThroughLongParm("gp-f", optarg, pInputOpts->pszPassThroughParms);
......@@ -793,7 +798,7 @@ fillInputOptions(int argc, char **argv, InputOptions * pInputOpts)
dataRestore = false;
}
#ifdef USE_DDBOOST
if (dd_boost_enabled)
{
......@@ -1117,7 +1122,7 @@ threadProc(void *arg)
pszNotifyRelNameFail = MakeString("%s_%s", pszNotifyRelName, SUFFIX_FAIL);
pollInput = (struct pollfd *)malloc(sizeof(struct pollfd));
while (!bIsFinished)
{
/*
......@@ -1136,18 +1141,18 @@ threadProc(void *arg)
DoCancelNotifyListen(pConn, false, pszKey, sSegDB->role, sSegDB->dbid, tSegDB->dbid, NULL);
bSentCancelMessage = true;
}
/* Replacing select() by poll() here to overcome the limitations of
/* Replacing select() by poll() here to overcome the limitations of
select() to handle large socket file descriptor values.
*/
pollInput->fd = sock;
pollInput->events = POLLIN;
pollInput->revents = 0;
pollInput->revents = 0;
pollTimeout = 2000;
pollResult = poll(pollInput, 1, pollTimeout);
if(pollResult < 0)
if(pollResult < 0)
{
g_b_SendCancelMessage = true;
pParm->pszErrorMsg = MakeString("poll failed for backup key %s, source dbid %d, target dbid %d failed\n",
......
......@@ -80,13 +80,13 @@ extern ddp_client_info_t dd_client_info;
static ddp_inst_desc_t ddp_inst = DDP_INVALID_DESCRIPTOR;
static ddp_conn_desc_t ddp_conn = DDP_INVALID_DESCRIPTOR;
static char *DDP_SU_NAME = NULL;
static char *DEFAULT_BACKUP_DIRECTORY = NULL;
char *log_message_path = NULL;
static int dd_boost_enabled = 0;
static char *dd_boost_buf_size = NULL;
static char *ddboostPg = NULL;
static char *ddboost_storage_unit = NULL;
#endif
#ifndef PATH_NAME_MAX
......@@ -243,6 +243,7 @@ main(int argc, char **argv)
{"netbackup-block-size", required_argument, NULL, 16},
{"change-schema-file", required_argument, NULL, 17},
{"schema-level-file", required_argument, NULL, 18},
{"ddboost-storage-unit",required_argument, NULL, 19},
{NULL, 0, NULL, 0}
};
......@@ -458,6 +459,11 @@ main(int argc, char **argv)
case 18:
schema_level_file = strdup(optarg);
break;
#ifdef USE_DDBOOST
case 19:
ddboost_storage_unit = strdup(optarg);
break;
#endif
default:
fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname);
exit(1);
......@@ -532,14 +538,14 @@ main(int argc, char **argv)
exit(1);
}
if (initDDSystem(&ddp_inst, &ddp_conn, &dd_client_info, &DDP_SU_NAME, false, &DEFAULT_BACKUP_DIRECTORY, false))
if (initDDSystem(&ddp_inst, &ddp_conn, &dd_client_info, &ddboost_storage_unit, false, &DEFAULT_BACKUP_DIRECTORY, false))
{
mpp_err_msg(logInfo, progname, "Initializing DD system failed\n");
exit(1);
}
mpp_err_msg(logInfo, progname, "ddboost is initialized\n");
ddp_file_name = formDDBoostFileName(g_gpdumpKey, postDataSchemaOnly, dd_boost_dir);
if (ddp_file_name == NULL)
{
......@@ -804,8 +810,13 @@ main(int argc, char **argv)
{
formDDBoostPsqlCommandLine(&pszCmdLine, bCompUsed, ddboostPg, g_compPg,
ddp_file_name, dd_boost_buf_size,
postDataSchemaOnly? postDataFilterScript : filterScript, table_filter_file,
g_role, psqlPg, postDataSchemaOnly, change_schema_file, schema_level_file);
postDataSchemaOnly? postDataFilterScript : filterScript,
table_filter_file,
g_role, psqlPg,
postDataSchemaOnly,
change_schema_file,
schema_level_file,
ddboost_storage_unit);
}
else
{
......@@ -1014,6 +1025,7 @@ main(int argc, char **argv)
#ifdef USE_DDBOOST
if(dd_boost_enabled)
cleanupDDSystem();
free(ddboost_storage_unit);
#endif
makeSureMonitorThreadEnds(TASK_RC_SUCCESS, TASK_MSG_SUCCESS);
......@@ -1084,6 +1096,8 @@ usage(const char *progname)
" use SESSION AUTHORIZATION commands instead of\n"
" OWNER TO commands\n"));
printf(_(" --ddboost-storage-unit Storage unit to use on the ddboost server\n"));
printf(_("\nConnection options:\n"));
printf(_(" -h, --host=HOSTNAME database server host or socket directory\n"));
printf(_(" -p, --port=PORT database server port number\n"));
......@@ -1209,17 +1223,17 @@ monitorThreadProc(void *arg __attribute__((unused)))
/* Once we've seen the TASK_FINISH insert request, we know to leave */
while (!bGotFinished)
{
/* Replacing select() by poll() here to overcome the limitations of
/* Replacing select() by poll() here to overcome the limitations of
select() to handle large socket file descriptor values.
*/
pollInput->fd = sock;
pollInput->events = POLLIN;
pollInput->revents = 0;
pollInput->revents = 0;
pollTimeout = 2000;
pollResult = poll(pollInput, 1, pollTimeout);
if(pollResult < 0)
if(pollResult < 0)
{
mpp_err_msg(logError, progname, "poll failed for backup key %s, instid %d, segid %d failed\n",
g_gpdumpKey, g_role, g_sourceDBID);
......@@ -1572,14 +1586,14 @@ static char *formDDBoostFileName(char *pszBackupKey, bool isPostData, char *dd_b
int len = 0;
char *pszBackupFileName;
char *dir_name = "db_dumps"; /* default directory */
instid = g_role; /* dispatch node */
segid = g_sourceDBID;
memset(szFileNamePrefix, 0, (1+PATH_NAME_MAX));
if (dd_boost_dir)
snprintf(szFileNamePrefix, 1 + PATH_NAME_MAX, "%s/%sgp_dump_%d_%d_", dd_boost_dir, DUMP_PREFIX, instid, segid);
else
else
snprintf(szFileNamePrefix, 1 + PATH_NAME_MAX, "%s/%sgp_dump_%d_%d_", dir_name, DUMP_PREFIX, instid, segid);
/* Now add up the length of the pieces */
......@@ -1602,7 +1616,7 @@ static char *formDDBoostFileName(char *pszBackupKey, bool isPostData, char *dd_b
}
memset(pszBackupFileName, 0, len + 1 );
strcat(pszBackupFileName, szFileNamePrefix);
strcat(pszBackupFileName, pszBackupKey);
......
......@@ -640,7 +640,7 @@ void test__formDDBoostPsqlCommandLine1(void **state)
formDDBoostPsqlCommandLine(&cmdLine, compUsed, ddboostPg, compProg,
ddp_file_name, dd_boost_buf_size,
filter_script, table_filter_file,
role, psqlPg, postSchemaOnly, NULL, NULL);
role, psqlPg, postSchemaOnly, NULL, NULL, NULL);
char *e = "ddboostPg --readFile --from-file=ddb_filename.gz --dd_boost_buf_size=512MB | gzip -c | filter.py -t filter.conf | psql";
assert_string_equal(cmdLine, e);
......@@ -663,7 +663,7 @@ void test__formDDBoostPsqlCommandLine2(void **state)
formDDBoostPsqlCommandLine(&cmdLine, compUsed, ddboostPg, compProg,
ddp_file_name, dd_boost_buf_size,
NULL, NULL,
role, psqlPg, postSchemaOnly, NULL, NULL);
role, psqlPg, postSchemaOnly, NULL, NULL, NULL);
char *e = "ddboostPg --readFile --from-file=ddb_filename.gz --dd_boost_buf_size=512MB | gzip -c | psql";
printf("cmdLine is %s", cmdLine);
......@@ -690,7 +690,7 @@ void test__formDDBoostPsqlCommandLine3(void **state)
formDDBoostPsqlCommandLine(&cmdLine, compUsed, ddboostPg, compProg,
ddp_file_name, dd_boost_buf_size,
filter_script, table_filter_file,
role, psqlPg, postSchemaOnly, NULL, NULL);
role, psqlPg, postSchemaOnly, NULL, NULL, NULL);
char *e = "ddboostPg --readFile --from-file=ddb_filename --dd_boost_buf_size=512MB | filter.py -t filter.conf | psql";
......@@ -714,7 +714,7 @@ void test__formDDBoostPsqlCommandLine4(void **state)
formDDBoostPsqlCommandLine(&cmdLine, compUsed, ddboostPg, compProg,
ddp_file_name, dd_boost_buf_size,
NULL, NULL,
role, psqlPg, postSchemaOnly, NULL, NULL);
role, psqlPg, postSchemaOnly, NULL, NULL, NULL);
char *e = "ddboostPg --readFile --from-file=ddb_filename --dd_boost_buf_size=512MB | psql";
assert_string_equal(cmdLine, e);
......@@ -739,7 +739,7 @@ void test__formDDBoostPsqlCommandLine5(void **state)
formDDBoostPsqlCommandLine(&cmdLine, compUsed, ddboostPg, compProg,
ddp_file_name, dd_boost_buf_size,
filter_script, table_filter_file,
role, psqlPg, postSchemaOnly, NULL, NULL);
role, psqlPg, postSchemaOnly, NULL, NULL, NULL);
char *e = "ddboostPg --readFile --from-file=ddb_filename.gz --dd_boost_buf_size=512MB | gzip -c | filter.py -m -t filter.conf | psql";
assert_string_equal(cmdLine, e);
......@@ -762,7 +762,7 @@ void test__formDDBoostPsqlCommandLine6(void **state)
formDDBoostPsqlCommandLine(&cmdLine, compUsed, ddboostPg, compProg,
ddp_file_name, dd_boost_buf_size,
NULL, NULL,
role, psqlPg, postSchemaOnly, NULL, NULL);
role, psqlPg, postSchemaOnly, NULL, NULL, NULL);
char *e = "ddboostPg --readFile --from-file=ddb_filename.gz --dd_boost_buf_size=512MB | gzip -c | psql";
assert_string_equal(cmdLine, e);
......@@ -787,7 +787,7 @@ void test__formDDBoostPsqlCommandLine7(void **state)
formDDBoostPsqlCommandLine(&cmdLine, compUsed, ddboostPg, compProg,
ddp_file_name, dd_boost_buf_size,
filter_script, table_filter_file,
role, psqlPg, postSchemaOnly, NULL, NULL);
role, psqlPg, postSchemaOnly, NULL, NULL, NULL);
char *e = "ddboostPg --readFile --from-file=ddb_filename --dd_boost_buf_size=512MB | filter.py -m -t filter.conf | psql";
assert_string_equal(cmdLine, e);
......@@ -810,7 +810,7 @@ void test__formDDBoostPsqlCommandLine8(void **state)
formDDBoostPsqlCommandLine(&cmdLine, compUsed, ddboostPg, compProg,
ddp_file_name, dd_boost_buf_size,
NULL, NULL,
role, psqlPg, postSchemaOnly, NULL, NULL);
role, psqlPg, postSchemaOnly, NULL, NULL, NULL);
char *e = "ddboostPg --readFile --from-file=ddb_filename --dd_boost_buf_size=512MB | psql";
assert_string_equal(cmdLine, e);
......@@ -836,7 +836,7 @@ void test__formDDBoostPsqlCommandLine9(void **state)
formDDBoostPsqlCommandLine(&cmdLine, compUsed, ddboostPg, compProg,
ddp_file_name, dd_boost_buf_size,
filter_script, table_filter_file,
role, psqlPg, postSchemaOnly, change_schema_file, NULL);
role, psqlPg, postSchemaOnly, change_schema_file, NULL, NULL);
char *e = "ddboostPg --readFile --from-file=ddb_filename.gz --dd_boost_buf_size=512MB | gzip -c | filter.py -m -t filter.conf -c /tmp/change_schema_file | psql";
assert_string_equal(cmdLine, e);
......@@ -862,7 +862,7 @@ void test__formDDBoostPsqlCommandLine10(void **state)
formDDBoostPsqlCommandLine(&cmdLine, compUsed, ddboostPg, compProg,
ddp_file_name, dd_boost_buf_size,
filter_script, table_filter_file,
role, psqlPg, postSchemaOnly, NULL, schema_level_file);
role, psqlPg, postSchemaOnly, NULL, schema_level_file, NULL);
char *e = "ddboostPg --readFile --from-file=ddb_filename.gz --dd_boost_buf_size=512MB | gzip -c | filter.py -m -t filter.conf -s /tmp/schema_level_file | psql";
assert_string_equal(cmdLine, e);
......@@ -889,7 +889,7 @@ void test__formDDBoostPsqlCommandLine11(void **state)
formDDBoostPsqlCommandLine(&cmdLine, compUsed, ddboostPg, compProg,
ddp_file_name, dd_boost_buf_size,
filter_script, table_filter_file,
role, psqlPg, postSchemaOnly, NULL, schema_level_file);
role, psqlPg, postSchemaOnly, NULL, schema_level_file, NULL);
char *e = "ddboostPg --readFile --from-file=ddb_filename.gz --dd_boost_buf_size=512MB | gzip -c | filter.py -t filter.conf -s /tmp/schema_level_file | psql";
assert_string_equal(cmdLine, e);
......@@ -915,13 +915,41 @@ void test__formDDBoostPsqlCommandLine12(void **state)
formDDBoostPsqlCommandLine(&cmdLine, compUsed, ddboostPg, compProg,
ddp_file_name, dd_boost_buf_size,
filter_script, table_filter_file,
role, psqlPg, postSchemaOnly, change_schema_file, NULL);
role, psqlPg, postSchemaOnly, change_schema_file, NULL, NULL);
char *e = "ddboostPg --readFile --from-file=ddb_filename.gz --dd_boost_buf_size=512MB | gzip -c | filter.py -t filter.conf -c /tmp/change_schema_file | psql";
assert_string_equal(cmdLine, e);
free(cmdLine);
}
void test__formDDBoostPsqlCommandLine13_with_storage_unit(void **state)
{
char *cmdLine = calloc(1000000, 1);
char *inputFileSpec = "fileSpec";
bool compUsed = true;
const char* compProg = "gzip -c";
int role = ROLE_SEGDB;
const char* filter_script = "filter.py";
const char* table_filter_file = "filter.conf";
const char* psqlPg = "psql";
const char* ddboostPg = "ddboostPg";
const char* ddp_file_name = "ddb_filename";
const char* dd_boost_buf_size = "512MB";
bool postSchemaOnly = false;
const char* change_schema_file = "/tmp/change_schema_file";
const char* ddboost_storage_unit = "foo";
formDDBoostPsqlCommandLine(&cmdLine, compUsed, ddboostPg, compProg,
ddp_file_name, dd_boost_buf_size,
filter_script, table_filter_file,
role, psqlPg, postSchemaOnly, change_schema_file, NULL,
ddboost_storage_unit);
char *e = "ddboostPg --readFile --from-file=ddb_filename.gz --ddboost-storage-unit=foo --dd_boost_buf_size=512MB | gzip -c | filter.py -t filter.conf -c /tmp/change_schema_file | psql";
assert_string_equal(cmdLine, e);
free(cmdLine);
}
void test__formDDBoostPsqlCommandLine_with_postSchemaOnly_and_master_role1(void **state)
{
char *cmdLine = calloc(1000000, 1);
......@@ -941,7 +969,7 @@ void test__formDDBoostPsqlCommandLine_with_postSchemaOnly_and_master_role1(void
formDDBoostPsqlCommandLine(&cmdLine, compUsed, ddboostPg, compProg,
ddp_file_name, dd_boost_buf_size,
postDataFilterScript, tableFilterFile,
role, psqlPg, postSchemaOnly, change_schema_file, NULL);
role, psqlPg, postSchemaOnly, change_schema_file, NULL, NULL);
char *e = "ddboostPg --readFile --from-file=ddb_filename --dd_boost_buf_size=512MB | gprestore_post_data_filter.py -t tablefilter -c /tmp/change_schema_file | psql";
assert_string_equal(cmdLine, e);
......@@ -967,7 +995,7 @@ void test__formDDBoostPsqlCommandLine_with_postSchemaOnly_and_master_role2(void
formDDBoostPsqlCommandLine(&cmdLine, compUsed, ddboostPg, compProg,
ddp_file_name, dd_boost_buf_size,
postDataFilterScript, tableFilterFile,
role, psqlPg, postSchemaOnly, NULL, schema_level_file);
role, psqlPg, postSchemaOnly, NULL, schema_level_file, NULL);
char *e = "ddboostPg --readFile --from-file=ddb_filename.gz --dd_boost_buf_size=512MB | gzip -c | gprestore_post_data_filter.py -t tablefilter -s /tmp/schema_level_file | psql";
assert_string_equal(cmdLine, e);
......@@ -992,7 +1020,7 @@ void test__formDDBoostPsqlCommandLine_with_postSchemaOnly_and_segment_role1(void
formDDBoostPsqlCommandLine(&cmdLine, compUsed, ddboostPg, compProg,
ddp_file_name, dd_boost_buf_size,
postDataFilterScript, tableFilterFile,
role, psqlPg, postSchemaOnly, change_schema_file, NULL);
role, psqlPg, postSchemaOnly, change_schema_file, NULL, NULL);
char *e = "ddboostPg --readFile --from-file=ddb_filename --dd_boost_buf_size=512MB | gprestore_post_data_filter.py -t tablefilter -c /tmp/change_schema_file | psql";
assert_string_equal(cmdLine, e);
......@@ -1019,7 +1047,7 @@ void test__formDDBoostPsqlCommandLine_with_postSchemaOnly_and_segment_role2(void
formDDBoostPsqlCommandLine(&cmdLine, compUsed, ddboostPg, compProg,
ddp_file_name, dd_boost_buf_size,
postDataFilterScript, tableFilterFile,
role, psqlPg, postSchemaOnly, NULL, schema_level_file);
role, psqlPg, postSchemaOnly, NULL, schema_level_file, NULL);
char *e = "ddboostPg --readFile --from-file=ddb_filename.gz --dd_boost_buf_size=512MB | gzip -c | gprestore_post_data_filter.py -t tablefilter -s /tmp/schema_level_file | psql";
assert_string_equal(cmdLine, e);
......@@ -1482,6 +1510,7 @@ main(int argc, char* argv[])
unit_test(test__formDDBoostPsqlCommandLine10),
unit_test(test__formDDBoostPsqlCommandLine11),
unit_test(test__formDDBoostPsqlCommandLine12),
unit_test(test__formDDBoostPsqlCommandLine13_with_storage_unit),
unit_test(test__formDDBoostPsqlCommandLine_with_postSchemaOnly_and_master_role1),
unit_test(test__formDDBoostPsqlCommandLine_with_postSchemaOnly_and_master_role2),
unit_test(test__formDDBoostPsqlCommandLine_with_postSchemaOnly_and_segment_role1),
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册