提交 f8488849 编写于 作者: D Daniel Gustafsson

Remove deprecated functionality and leftovers from removed utils

This removes all utilities which are known to be deprecated from
gpMgmt and it also clears out any command line switches marked as
deprecated along with code marked deprecated. It also removes the
leftovers from previously removed utilities such as gpsuspend and
gpcheck_hostdump etc and fixes mentions of renamed utilities.

There should be no functional change from this commit as the only
removed utils have been marked deprecated for some time.
上级 1eeea564
......@@ -925,7 +925,6 @@ PVK_SCRIPTS = \
bin/generate_load_tpch.pl \
bin/gpscp \
bin/gpssh \
bin/gpcheckos \
bin/gpcheckperf \
bin/run_operator_tests.pl \
$(NULL)
......@@ -960,9 +959,8 @@ SET_VERSION_SCRIPTS = \
bin/gpactivatestandby \
bin/gpaddmirrors \
bin/gpbitmapreindex \
bin/gpcheckos \
bin/gpcheckperf \
bin/gpcrondump.py \
bin/gpcrondump \
bin/gpdbrestore \
bin/gpdeletesystem \
bin/gpexpand \
......@@ -974,12 +972,9 @@ SET_VERSION_SCRIPTS = \
bin/gpmigrator \
bin/gpmigrator_mirror \
bin/gpmovemirrors \
bin/gprebuildsystem \
bin/gprecoverseg \
bin/gpreload \
bin/gpscp \
bin/gpsizecalc \
bin/gpskew \
bin/gpssh \
bin/gpssh-exkeys \
bin/gpstart \
......@@ -989,7 +984,6 @@ SET_VERSION_SCRIPTS = \
bin/lib/gpcheckcat \
sbin/gpaddconfig.py \
sbin/gpchangeuserpassword \
sbin/gpcheck_hostdump \
sbin/gpcleansegmentdir.py \
sbin/gpfixuserlimts \
sbin/gpgetstatususingtransition.py \
......@@ -998,7 +992,6 @@ SET_VERSION_SCRIPTS = \
sbin/gpsegstop.py \
sbin/gpsegtoprimaryormirror.py \
sbin/gpsetdbid.py \
sbin/gpsuspend.py \
sbin/gpupgrademirror.py \
lib/python/gppylib/programs/clsAddMirrors.py \
lib/python/gppylib/programs/clsHostCacheLookup.py \
......@@ -1007,7 +1000,6 @@ SET_VERSION_SCRIPTS = \
lib/python/gppylib/programs/clsSystemState.py \
lib/python/gppylib/programs/gppkg.py \
lib/python/gppylib/programs/kill.py \
lib/python/gppylib/programs/verify.py \
lib/python/gppylib/mainUtils.py \
$(NULL)
......@@ -1135,8 +1127,6 @@ endif
rm -rf $(INSTLOC)/bin/lib/.p4ignore
rm -rf $(INSTLOC)/bin/src
rm -f $(INSTLOC)/bin/gpchecksubnetcfg
echo "`date` -- INFO: Removing $(INSTLOC)/bin/gpexpandsystem"
rm -f $(INSTLOC)/bin/gpexpandsystem
rm -rf $(INSTLOC)/bin/gppylib
find $(INSTLOC)/lib/python/gppylib -name test -type d | xargs rm -rf
......
......@@ -7,14 +7,12 @@ bin/generate_load_tpch.pl
bin/gp
bin/gpaddmirrors.py
bin/gpdbupgrade
bin/gpexpandsystem
bin/gpfaultinjector
bin/gpha
bin/gpma
bin/gppaxos
bin/gpprintdbsizes
bin/gprecoverseg.py
bin/gpsuspend
bin/gptorment.pl
bin/gpugcluster
bin/lib/gpgetconfig.py
......@@ -26,7 +24,5 @@ bin/run_operator_tests.pl
bin/test_fsync
bin/gp_filedump
docs/contrib/README.gp_filedump
docs/cli_help/gpsuspend_help
lib/python/gppylib/programs/clsInjectFault.py
sbin/gpsuspend.py
......@@ -9,9 +9,8 @@ SET_VERSION_SCRIPTS = \
bin/gpactivatestandby \
bin/gpaddmirrors \
bin/gpbitmapreindex \
bin/gpcheckos \
bin/gpcheckperf \
bin/gpcrondump.py \
bin/gpcrondump \
bin/gpdbrestore \
bin/gpdeletesystem \
bin/gpexpand \
......@@ -23,12 +22,9 @@ SET_VERSION_SCRIPTS = \
bin/gpmigrator \
bin/gpmigrator_mirror \
bin/gpmovemirrors \
bin/gprebuildsystem \
bin/gprecoverseg \
bin/gpreload \
bin/gpscp \
bin/gpsizecalc \
bin/gpskew \
bin/gpssh \
bin/gpssh-exkeys \
bin/gpstart \
......@@ -38,7 +34,6 @@ SET_VERSION_SCRIPTS = \
bin/gpcheckcat \
sbin/gpaddconfig.py \
sbin/gpchangeuserpassword \
sbin/gpcheck_hostdump \
sbin/gpcleansegmentdir.py \
sbin/gpfixuserlimts \
sbin/gpgetstatususingtransition.py \
......@@ -47,7 +42,6 @@ SET_VERSION_SCRIPTS = \
sbin/gpsegstop.py \
sbin/gpsegtoprimaryormirror.py \
sbin/gpsetdbid.py \
sbin/gpsuspend.py \
sbin/gpupgrademirror.py \
lib/python/gppylib/programs/clsAddMirrors.py \
lib/python/gppylib/programs/clsHostCacheLookup.py \
......@@ -56,7 +50,6 @@ SET_VERSION_SCRIPTS = \
lib/python/gppylib/programs/clsSystemState.py \
lib/python/gppylib/programs/gppkg.py \
lib/python/gppylib/programs/kill.py \
lib/python/gppylib/programs/verify.py \
lib/python/gppylib/mainUtils.py \
$(NULL)
......@@ -122,7 +115,5 @@ install: generate_greenplum_path_file
rm -rf $(prefix)/bin/Makefile
rm -rf $(prefix)/bin/src
rm -f $(prefix)/bin/gpchecksubnetcfg
echo "`date` -- INFO: Removing $(prefix)/bin/gpexpandsystem"
rm -f $(prefix)/bin/gpexpandsystem
rm -rf $(prefix)/bin/gppylib
find $(prefix)/lib/python/gppylib -name test -type d | xargs rm -rf
......@@ -34,18 +34,14 @@ run_operator_tests.pl - ???
List of Management Scripts Written in Bash
------------------------------------------
bin/gpcheckos.bash - Like gpcheckos, but in bash
bin/gpcrondump - Dumps a database
bin/gpdbrestore - Restores a database dumped from gpcrondump
bin/gpinitsystem - Creates a new Greenplum Database
bin/gpload - Sets env variables and calls gpload.py
bin/gpprintdbsizes - Removed in 4.0
bin/gprebuildsystem - Deprecated in 3.4
List of Management Scripts Written in Python (no libraries)
-----------------------------------------------------------
bin/gpdetective - Removed in 4.3.5.1.
bin/gpload.py - Loads data into a Greenplum Database
bin/gpmigrator - Upgrades from previous versions
bin/gpsys1 - Print system information on a host (???)
......@@ -55,7 +51,6 @@ List of Management Scripts Written in Python (gpmlib - old libraries)
---------------------------------------------------------------------
bin/gpaddmirrors - Adds mirrors to an array (needs rewrite)
bin/gprecoverseg - Recovers a failed segment (needs rewrite)
bin/gpchecknet - Checks network performance
bin/gpcheckperf - Checks the hardware for Greenplum Database
bin/gpscp - Copies files to many hosts
bin/gpssh - Remote shell to many hosts
......@@ -66,7 +61,6 @@ List of Management Scripts Written in Python (gppylib - current libraries)
--------------------------------------------------------------------------
bin/gpactivatestandby - Activates the Standby Master
bin/gpaddconfig - Edits postgresql.conf file for all segments
bin/gpcheckos - Check operating system settings
bin/gpdeletesystem - Deletes a Greenplum Database
bin/gpexpand - Adds additional segments to a Greenplum Database
bin/gpfilespace - Adds a filespace to a Greenplum Database (partial use of libraries)
......@@ -83,9 +77,6 @@ sbin/gpsegcopy - Helper script for gpexpand
sbin/gpsegstart.py - Helper script for gpstart
sbin/gpsegstop.py - Helper script for gpstop
sbin/gpsegtoprimaryormirror.py - Helper script for failover
sbin/gpstandbystart.sh - Removed in 4.0
sbin/gpage.py - Removed in 4.0 due to query prioritization
sbin/gpsuspend.py - Unused Helper script for ??? (unused)
Overview of gppylib
......@@ -103,15 +94,6 @@ gparray.py
+- GpArray - Configuartion information for a Greenplum Database
\- Contains mulptile GpSegment objects
gpcheckos.py - Old dead code? Not called by gpcheckos.
stringUtil.py - weird gpcheckos foo ???
commands/gpcheckosUnix.py - commands for gpcheckos
gpcheckosCmd.py - Should move to commands/gp.py
gpcheckosDa.py - ???
gpcheckosXml.py - Should be modified to be less gpcheckos specific
|
+- GpXml - mostly generic XML file wrapper
gphostcache.py
|
+- GpHost - Information about a single Host
......
此差异已折叠。
......@@ -78,8 +78,6 @@ class GpCronDump(Operation):
self.context.dump_database = self.context.dump_databases[0]
self.include_email_file = options.include_email_file
if options.report_dir:
logger.warn("-y is a deprecacted option. Report files are always generated with the backup set.")
if self.context.incremental and len(self.context.dump_databases) == 0:
raise ExceptionNoStackTraceNeeded("Must supply -x <database name> with incremental option")
......@@ -1384,8 +1382,6 @@ def create_parser():
addTo.add_option('-u', dest='backup_dir', metavar="<BACKUPFILEDIR>",
help="Directory where backup files are placed [default: data directories]")
addTo.add_option('-y', dest='report_dir', metavar="<REPORTFILEDIR>",
help="DEPRECATED OPTION: Directory where report file is placed")
addTo.add_option('-E', dest='encoding', metavar="<encoding>",
help="Dump the data under the given encoding")
addTo.add_option('--clean', const='--clean', action='append_const', dest='output_options',
......
......@@ -2808,8 +2808,8 @@ System Expansion is used to add segments to an existing GPDB array.
gpexpand did not detect a System Expansion that is in progress.
Before initiating a System Expansion, you need to provision and burn-in
the new hardware. Please be sure to run gpcheckperf/gpcheckos to make
sure the new hardware is working properly.
the new hardware. Please be sure to run gpcheckperf to make sure the
new hardware is working properly.
Please refer to the Admin Guide for more information."""
......
......@@ -1111,7 +1111,7 @@ class GpMfr(Operation):
"""
gplog dumps logs on stdout. In order to show progress bars correctly,
we don't want these logs on stdout. If this class is used by other
Python modules (e.g. gpcrondump.py), we must provide a way to restore
Python modules (e.g. gpcrondump), we must provide a way to restore
log level after we are done.
"""
self.originalStdoutLog = gplog._SOUT_HANDLER.level
......
......@@ -1223,17 +1223,6 @@ class GpCatVersionDirectory(Command):
cmd.run(validateAfter=True)
return cmd.get_version()
#-----------------------------------------------
class GpSuspendSegmentsOnHost(Command):
def __init__(self, name, gpconfigstrings, resume, ctxt=LOCAL, remoteHost=None):
if resume:
pauseOrResume = "--resume"
else:
pauseOrResume = "--pause"
cmdStr="echo '%s' | $GPHOME/sbin/gpsuspend.py %s" % (gpconfigstrings, pauseOrResume)
Command.__init__(self,name,cmdStr,ctxt,remoteHost)
#-----------------------------------------------
class GpAddConfigScript(Command):
def __init__(self, name, directorystring, entry, value=None, removeonly=False, ctxt=LOCAL, remoteHost=None):
......
......@@ -864,8 +864,7 @@ def createSegmentRows( hostlist
return rows
elif mirror_type.lower().strip() == 'spread':
#TODO: must be sure to put mirrors on a different subnet than primary.
# this is a general problem for GPDB these days. perhaps we should
# add something to gpdetective to be able to detect this and fix it.
# this is a general problem for GPDB these days.
# best to have the interface mapping stuff 1st.
content=0
isprimary='f'
......@@ -1506,7 +1505,7 @@ class GpArray:
(called by gpexpand.)
Note: Currently this is only used by the gpexpand rollback facility,
and by gpsuspend utility,
and by gpmigrator utility,
there is currently NO expectation that this file format is saved
on disk in any long term fashion.
......
......@@ -255,56 +255,6 @@ class Gppkg:
return gppkg
class LocalCommand(Operation):
'''
DEPRECATED
TODO: AK: Eliminate this. Replace invocations with Command(...).run(validateAfter = True)
'''
def __init__(self, cmd_str, echo = False):
self.cmd_str = cmd_str
self.echo = echo
def execute(self):
logger.debug(self.cmd_str)
cmd = Command(name = 'LocalCommand', cmdStr = self.cmd_str)
cmd.run(validateAfter = True)
if self.echo:
echo_str = cmd.get_results().stdout.strip()
if echo_str:
logger.info(echo_str)
return cmd.get_results()
class RemoteCommand(Operation):
"""
DEPRECATED
TODO: AK: Rename as GpSsh, like GpScp below.
"""
def __init__(self, cmd_str, host_list):
self.cmd_str = cmd_str
self.host_list = host_list
self.pool = None
def execute(self):
logger.debug(self.cmd_str)
# Create Worker pool
# and add commands to it
self.pool = WorkerPool()
for host in self.host_list:
cmd = Command(name = 'Remote Command', cmdStr = self.cmd_str, ctxt = REMOTE, remoteHost = host)
self.pool.addCommand(cmd)
self.pool.join()
#This will raise ExecutionError exception if even a single command fails
self.pool.check_results()
class ListPackages(Operation):
'''
Lists all the packages present in
......
#!/bin/bash
WORKDIR=`dirname $0`
FUNCTIONS=$WORKDIR/lib/gp_bash_functions.sh
if [ -f $FUNCTIONS ];then
. $FUNCTIONS
else
echo "FATAL:-Cannot source $FUNCTIONS"
exit 2;
fi
CURRENT_VERSION=`$EXPORT_GPHOME; $EXPORT_LIB_PATH; $GPHOME/bin/postgres --gp-version | $CUT -d " " -f 4-`
LOG_MSG "[INFO]:-The gpsizecalc utility has been deprecated in Greenplum Database $CURRENT_VERSION." 0
LOG_MSG "[INFO]:-Use the gp_size_of_* family of views in the Greenplum Administrative Schema (gp_toolkit) " 0
LOG_MSG "[INFO]:-to check the sizing of a database, schema, or relation." 0
LOG_MSG "[INFO]:-Exiting..." 0
exit 1
#!/bin/bash
WORKDIR=`dirname $0`
FUNCTIONS=$WORKDIR/lib/gp_bash_functions.sh
if [ -f $FUNCTIONS ];then
. $FUNCTIONS
else
echo "FATAL:-Cannot source $FUNCTIONS"
exit 2;
fi
CURRENT_VERSION=`$EXPORT_GPHOME; $EXPORT_LIB_PATH; $GPHOME/bin/postgres --gp-version | $CUT -d " " -f 4-`
LOG_MSG "[INFO]:-The gpskew utility has been deprecated in Greenplum Database $CURRENT_VERSION." 0
LOG_MSG "[INFO]:-Use the gp_skew_* family of views in the Greenplum Administrative Schema (gp_toolkit) " 0
LOG_MSG "[INFO]:-to check the data distribution skew of a table." 0
LOG_MSG "[INFO]:-Exiting..." 0
exit 1
......@@ -184,10 +184,7 @@ GPCRONDUMP=$SCRIPTDIR/gpcrondump
GPDELETESYSTEM=$SCRIPTDIR/gpdeletesystem
GPINITSTANDBY=$SCRIPTDIR/gpinitstandby
GPREBUILDCLUSTER=$SCRIPTDIR/gprebuildcluster
GPREBUILDSEG=$SCRIPTDIR/gprebuildseg
GPRECOVERSEG=$SCRIPTDIR/gprecoverseg
GPSIZECALC=$SCRIPTDIR/gpsizecalc
GPSKEW=$SCRIPTDIR/gpskew
GPSTART=$SCRIPTDIR/gpstart
GPSTATE=$SCRIPTDIR/gpstate
GPSTOP=$SCRIPTDIR/gpstop
......
#!/bin/bash
# Filename:- gpsegsize.sh
# Version:- $Revision$
# Updated:- $Date$
# Status:- Released
# Author:- G Coombe
# Contact:- gcoombe@greenplum.com
# Release date:- Dec 2006
# Release stat:- Released
# Copyright (c) Metapa 2005. All Rights Reserved.
# Copyright (c) 2007 Greenplum Inc
#******************************************************************************
# Update History
#******************************************************************************
# Ver Date Who Update
#******************************************************************************
# Detailed Description
#******************************************************************************
#******************************************************************************
# Prep Code
WORKDIR=`dirname $0`
# Source required functions file, this required for script to run
# exit if cannot locate this file. Change location of FUNCTIONS variable
# as required.
FUNCTIONS=$WORKDIR/gp_bash_functions.sh
if [ -f $FUNCTIONS ]; then
. $FUNCTIONS
else
echo "[FATAL]:-Cannot source $FUNCTIONS file Script Exits!"
exit 2
fi
#******************************************************************************
# Script Specific Variables
#******************************************************************************
# Log file that will record script actions
CUR_DATE=`$DATE +%Y%m%d`
TIME=`$DATE +%H%M%S`
PROG_NAME=`$BASENAME $0`
# Level of script feedback 0=small 1=verbose
unset VERBOSE
GP_USER=$USER_NAME
EXIT_STATUS=0
#******************************************************************************
# Functions
#******************************************************************************
USAGE () {
$ECHO
$ECHO " `basename $0`"
$ECHO
$ECHO " Script called by gpsizecalc, this should not be run directly"
exit $EXIT_STATUS
}
CHK_CALL () {
FILE_PREFIX=`$ECHO $PARALLEL_STATUS_FILE|$CUT -d"." -f1`
if [ ! -f ${FILE_PREFIX}.$PARENT_PID ];then
$ECHO "[FATAL]:-Not called from from correct parent program"
exit 2
fi
}
SIZE_ERROR_EXIT () {
LOG_MSG "[INFO][$INST_COUNT]:-Start Function $FUNCNAME"
$ECHO "${QE_HOST}|${QE_BASE_DIR}|${QE_PORT}|0" >> $TMPFILE
$ECHO "FAILED:${QE_LINE}" >> $PARALLEL_STATUS_FILE
LOG_MSG "[WARN]:-Failed to process $1 size request"
LOG_MSG "[INFO][$INST_COUNT]:-End Function $FUNCNAME"
exit 1
}
PING_CHK () {
PING_HOST $QE_HOST 1
if [ $RETVAL -ne 0 ];then
SIZE_ERROR_EXIT
fi
}
GET_DATABASE_SIZE () {
LOG_MSG "[INFO][$INST_COUNT]:-Start Function $FUNCNAME"
#Get the database OID
DB_OID=`$TRUSTED_SHELL -n $QE_PRIMARY_HOST "${EXPORT_LIB_PATH}; env PGOPTIONS=\"-c gp_session_role=utility\" $PSQL -p $QE_PRIMARY_PORT -d \"$DEFAULTDB\" -A -t -c\"select OID from pg_database where datname='$QD_DBNAME'\""`
QE_BASE_DIR=`$BASENAME $QE_DIR`
QE_DB=${QE_DIR}/base/${DB_OID}
$ECHO ${QE_HOST}"|"${QE_BASE_DIR}"|"${QE_PORT}"|"`$TRUSTED_SHELL $QE_HOST "ls -al ${QE_DB}|$GREP -v '^d'|$GREP -vi total 2>/dev/null" |$AWK '{total += \$5} END {print total}'` >> $TMPFILE
RETVAL=$?
if [ $RETVAL -ne 0 ];then SIZE_ERROR_EXIT database;fi
LOG_MSG "[INFO][$INST_COUNT]:-End Function $FUNCNAME"
}
GET_TABLE_SIZE () {
LOG_MSG "[INFO][$INST_COUNT]:-Start Function $FUNCNAME"
#Get the database OID
DB_OID=`$TRUSTED_SHELL -n $QE_PRIMARY_HOST "${EXPORT_LIB_PATH}; env PGOPTIONS=\"-c gp_session_role=utility\" $PSQL -p $QE_PRIMARY_PORT -d \"$DEFAULTDB\" -A -t -c\"select OID from pg_database where datname='$QD_DBNAME'\""`
#Now get schema OID
SCHEMA_OID=`$TRUSTED_SHELL -n $QE_PRIMARY_HOST "${EXPORT_LIB_PATH}; env PGOPTIONS=\"-c gp_session_role=utility\" $PSQL -p $QE_PRIMARY_PORT -d \"$QD_DBNAME\" -A -t -c\"select OID from pg_namespace where nspname='${SCHEMA_NAME}'\""`
#Now get the table relfilenode
QE_TABLE_OID=`$TRUSTED_SHELL -n $QE_PRIMARY_HOST "${EXPORT_LIB_PATH}; env PGOPTIONS=\"-c gp_session_role=utility\" $PSQL -p $QE_PRIMARY_PORT -d \"$QD_DBNAME\" -A -t -c\"select relfilenode from pg_class where relname='${TMP_TABLE_NAME}' and relnamespace=${SCHEMA_OID};\""`
#Can now progress the size request
QE_TABLE=${QE_DIR}/base/${DB_OID}/${QE_TABLE_OID}
QE_BASE_DIR=`$BASENAME $QE_DIR`
$ECHO ${QE_HOST}"|"${QE_BASE_DIR}"|"${QE_PORT}"|"`$TRUSTED_SHELL $QE_HOST "if [ -f $QE_TABLE ];then ls -al ${QE_TABLE};else $ECHO 0 0 0 0 0;fi"|$TAIL -1|$AWK '{print \$5}'` >> $TMPFILE
RETVAL=$?
if [ $RETVAL -ne 0 ];then SIZE_ERROR_EXIT table;fi
$ECHO ${QE_HOST}"|"${QE_BASE_DIR}"|"${QE_PORT}"|"`$TRUSTED_SHELL $QE_HOST "ls -al ${QE_TABLE}.*|$GREP -v '^d'|$GREP -vi total" 2>/dev/null|$AWK '{total += \$5} END {print total}'` >> $TMPFILE
RETVAL=$?
if [ $RETVAL -ne 0 ];then SIZE_ERROR_EXIT table;fi
LOG_MSG "[INFO][$INST_COUNT]:-End Function $FUNCNAME"
}
GET_INDEX_SIZE () {
LOG_MSG "[INFO][$INST_COUNT]:-Start Function $FUNCNAME"
#Get the database OID
DB_OID=`$TRUSTED_SHELL -n $QE_PRIMARY_HOST "${EXPORT_LIB_PATH}; env PGOPTIONS=\"-c gp_session_role=utility\" $PSQL -p $QE_PRIMARY_PORT -d \"$DEFAULTDB\" -A -t -c\"select OID from pg_database where datname='$QD_DBNAME'\""`
INDEX_ARRAY=(`$TRUSTED_SHELL -n $QE_PRIMARY_HOST "${EXPORT_LIB_PATH};env PGOPTIONS=\"-c gp_session_role=utility\" $PSQL -p $QE_PRIMARY_PORT -d \"$QD_DBNAME\" -A -t -c\"select relfilenode from pg_class where oid in (select indexrelid from pg_index where indrelid in (select oid from pg_class where relname='${TMP_TABLE_NAME}'));\""`)
RETVAL=$?
if [ $RETVAL -ne 0 ] || [ ${#INDEX_ARRAY[@]} -eq 0 ];then
SIZE_ERROR_EXIT index
else
for IDX in ${INDEX_ARRAY[@]}
do
QE_INDEX=${QE_DIR}/base/${DB_OID}/$IDX
QE_BASE_DIR=`$BASENAME $QE_DIR`
$ECHO ${QE_HOST}"|"${QE_BASE_DIR}"|"${QE_PORT}"|"`$TRUSTED_SHELL $QE_HOST "ls -al ${QE_INDEX}"|$TAIL -1|$AWK '{print \$5}'` >> $TMPFILE
$ECHO ${QE_HOST}"|"${QE_BASE_DIR}"|"${QE_PORT}"|"`$TRUSTED_SHELL $QE_HOST "ls -al ${QE_INDEX}.*|$GREP -v '^d'|$GREP -vi total" 2>/dev/null|$AWK '{total += \$5} END {print total}'` >> $TMPFILE
done
fi
LOG_MSG "[INFO][$INST_COUNT]:-End Function $FUNCNAME"
}
#******************************************************************************
# Main Section
#******************************************************************************
trap '$ECHO "KILLED|${QE_LINE}" >> $PARALLEL_STATUS_FILE;ERROR_EXIT "[FATAL]:-[$INST_COUNT]-Recieved INT or TERM signal" 2' INT TERM
while getopts ":v'?'" opt
do
case $opt in
v ) VERSION_INFO ;;
'?' ) USAGE ;;
* ) USAGE
esac
done
#Now process supplied call parameters
PARENT_PID=$1;shift #PID of gpstate process calling this script
CHK_CALL
QE_HOST=$1;shift #Hostname holding database segment
QE_PORT=$1;shift #Segment port
QE_DIR=$1;shift #Segment directory
INST_COUNT=$1;shift #Unique number for this parallel script, starts at 0
LOG_FILE=$1;shift
TABLE_NAME=$1;shift
QD_DBNAME="$1";shift
TYPE=$1;shift
TMPFILE=$1;shift
QD_DBNAME="$1";shift
QE_PRIMARY_HOST=$1;shift # the host name of the primary
QE_PRIMARY_PORT=$1;shift # the port of the primary
QE_PRIMARY_DIR=$1;shift # the dir of the primary
QE_PRIMARY_BASE_DIR=`$BASENAME $QE_PRIMARY_DIR`
LOG_MSG "[INFO][$INST_COUNT]:-Start Main"
SCHEMA_NAME=`$ECHO $TABLE_NAME|$CUT -d"." -f1`
TMP_TABLE_NAME=`$ECHO $TABLE_NAME|$CUT -d"." -f2`
PING_CHK
case $TYPE in
1 ) GET_TABLE_SIZE ;;
2 ) GET_INDEX_SIZE ;;
3 ) GET_DATABASE_SIZE ;;
esac
QE_LINE="${QE_HOST}:${QE_PORT}:${QE_DIR}"
$ECHO "COMPLETED:${QE_LINE}" >> $PARALLEL_STATUS_FILE
LOG_MSG "[INFO][$INST_COUNT]:-End Main"
exit $EXIT_STATUS
......@@ -318,4 +318,4 @@ gp_dump -n myschema mydatabase
SEE ALSO
*****************************************************
gp_restore, gprebuildsystem, gprebuildseg
gp_restore
COMMAND NAME: gp_restore
Restores Greenplum databases that were backed up using gp_dump.
The gp_restore utility is deprecated and will be removed in a future
release. Use gpcrondump and gpdbrestore to backup and restore Greenplum
databases.
*****************************************************
SYNOPSIS
*****************************************************
gp_restore --gp-k=<timestamp_key> -d <database_name> [-i] [-v] [-a | -s]
[-h <hostname> ] [-p <port>] [-U <username>] [-W] [--gp-c] [--gp-i]
[--gp-d=<directoryname>] [--gp-r=<reportfile>] [--gp-l=dbid [, ...]]
gp_restore -? | -h | --help
gp_restore --version
*****************************************************
DESCRIPTION
*****************************************************
The gp_restore utility recreates the data definitions (schema) and user
data in a Greenplum database using the script files created by an
gp_dump operation. The use of this utility assumes:
1. You have backup files created by an gp_dump operation.
2. Your Greenplum Database system up and running.
3. Your Greenplum Database system has the exact same number of segment
instances (primary and mirror) as the system that was backed up using
gp_dump.
4. (optional) The gp_restore utility uses the information in the
Greenplum system catalog tables to determine the hosts, ports, and
data directories for the segment instances it is restoring. If you want
to change any of this information (for example, move the system to a
different array of hosts) you must use the gprebuildsystem and
gprebuildseg scripts to reconfigure your array before restoring.
5. The databases you are restoring have been created in the system.
6. If you used the options -s (schema only), -a (data only), --gp-c
(compressed), --gp-d (alternate dump file location) when performing
the gp_dump operation, you must specify these options when doing the
gp_restore as well.
The functionality of gp_restore is analogous to the PostgreSQL pg_restore
utility, which restores a database from files created by the database
backup process. It issues the commands necessary to reconstruct the
database to the state it was in at the time it was saved.
The functionality of gp_restore is modified to accommodate the
distributed nature of Greenplum Database, and to use files created by
an gp_dump operation. Keep in mind that a database in Greenplum is
actually comprised of several PostgreSQL database instances (the master
and all segments), each of which must be restored individually. The
gp_restore utility takes care of populating each segment in the system
with its own distinct portion of data.
NOTE: The gp_dump utility creates a dump file in the master data
directory named gp_dump_1_<dbid>_<timestamp>_post_data that contains
commands to rebuild objects associated with the tables. When the
database is restored with gp_restore, first, the schema and data are
restored, and then, the dump file is used to rebuilt the other objects
associated with the tables.
The gp_restore utility performs the following actions:
ON THE MASTER HOST
* Creates the user database schema(s) using the
gp_dump_1_<dbid>_<timestamp> SQL file created by gp_dump.
* Creates a log file in the master data directory named
gp_restore_status_1_<dbid>_<timestamp>.
* gp_restore launches a gp_restore_agent for each segment instance to
be restored. gp_restore_agent processes run on the segment hosts and
report status back to the gp_restore process running on the master host.
ON THE SEGMENT HOSTS
* Restores the user data for each segment instance using the
gp_dump_0_<dbid>_<timestamp> files created by gp_dump. Each segment
instance on a host (primary and mirror instances) are restored.
* Creates a log file for each segment instance named
gp_restore_status_0_<dbid>_<timestamp>.
The 14 digit timestamp is the number that uniquely identifies the backup
job to be restored, and is part of the filename for each dump file
created by a gp_dump operation. This timestamp must be passed to the
gp_restore utility when restoring a Greenplum Database.
NOTE: The restore status files are stored under the db_dumps/<date>
directory.
After the data in the tables is restored, check the report status files
to verify that there no errors.
*****************************************************
OPTIONS
*****************************************************
--gp-k=<timestamp_key>
Required. The 14 digit timestamp key that uniquely identifies the backup
set of data to restore. This timestamp can be found in the gp_dump log
file output, as well as at the end of the dump files created by a
gp_dump operation. It is of the form YYYYMMDDHHMMSS.
-d <database_name> | --dbname=<dbname>
Required. The name of the database to connect to in order to restore the
user data. The database(s) you are restoring must exist, gp_restore does
not create the database.
-i | --ignore-version
Ignores a version mismatch between gp_restore and the database server.
-v | --verbose
Specifies verbose mode.
-a | --data-only
Restore only the data, not the schema (data definitions).
-s | --schema-only
Restores only the schema (data definitions), no user data is restored.
-h <hostname> | --host=<hostname>
The host name of the Greenplum master host. If not provided, the value
of PGHOST or the local host is used.
-p <port> | --port=<port>
The Greenplum master port. If not provided, the value of PGPORT or the
port number provided at compile time is used.
-U <username> | --username=<username>
The database superuser account name, for example gpadmin. If not
provided, the value of PGUSER or the current OS user name is used.
-W (force password prompt)
Forces a password prompt. This will happen automatically if the server
requires password authentication.
--gp-c (use gunzip)
Use gunzip for inline decompression.
--gp-i (ignore errors)
Specifies that processing should ignore any errors that occur. Use this
option to continue restore processing on errors.
--gp-d=<directoryname>
Specifies the relative or absolute path to backup files on the hosts. If
this is a relative path, it is considered to be relative to the data
directory. If not specified, defaults to the data directory of each
instance being restored. Use this option if you created your backup
files in an alternate location when running gp_dump.
--gp-r=<reportfile>
Specifies the full path name where the restore job report file will be
placed on the master host. If not specified, defaults to the master data
directory.
--gp-l=<dbid> [, ...] (restore certain segments)
Specifies whether to check for backup files on only the specified active
segment instances (followed by a comma-separated list of the segments
dbid). The default is to check for backup files on all active segments,
restore the active segments, and then syncronize the mirrors.
-? | -h | --help (help)
Displays the online help.
--version (show utility version)
Displays the version of this utility.
*****************************************************
EXAMPLES
*****************************************************
Restore an Greenplum database using backup files created by gp_dump:
gp_restore --gp-k=2005103112453 -d gpdb
Restore a single segment instance only (by noting the dbid of the
segment instance):
gp_restore --gp-k=2005103112453 -d gpdb --gp-s=5
*****************************************************
SEE ALSO
*****************************************************
pg_restore, gpdbrestore
<?xml version="1.0"?>
<gpcheckosxml>
<osParm>
<sysctlConf>
<param>net.ipv4.ip_forward</param>
<value>0</value>
</sysctlConf>
<sysctlConf>
<param>net.ipv4.tcp_tw_recycle</param>
<value>1</value>
</sysctlConf>
<sysctlConf>
<param>kernel.sem</param>
<value>250 64000 100 512</value>
</sysctlConf>
<sysctlConf>
<param>kernel.shmall</param>
<value>4000000000</value>
</sysctlConf>
<sysctlConf>
<param>kernel.shmmni</param>
<value>4096</value>
</sysctlConf>
<sysctlConf>
<param>kernel.shmmax</param>
<value>500000000</value>
</sysctlConf>
<sysctlConf>
<param>kernel.msgmax</param>
<value>65536</value>
</sysctlConf>
<sysctlConf>
<param>kernel.msgmnb</param>
<value>65536</value>
</sysctlConf>
<sysctlConf>
<param>net.ipv4.tcp_syncookies</param>
<value>1</value>
</sysctlConf>
<sysctlConf>
<param>kernel.core_uses_pid</param>
<value>1</value>
</sysctlConf>
<sysctlConf>
<param>net.ipv4.conf.default.accept_source_route</param>
<value>0</value>
</sysctlConf>
<param>net.ipv4.tcp_max_syn_backlog</param>
<value>1</value>
<sysctlConf>
<param>net.core.netdev_max_backlog</param>
<value>10000</value>
</sysctlConf>
<sysctlConf>
<param>vm.overcommit_memory</param>
<value>2</value>
</sysctlConf>
<sysctlConf>
<param>kernel.sysrq</param>
<value>0</value>
</sysctlConf>
<limitsConf>
<limit>nofile</limit>
<softValue>* soft nofile 65536</softValue>
<hardValue>* hard nofile 65536</hardValue>
</limitsConf>
<limitsConf>
<limit>nproc</limit>
<softValue>* soft nproc 131072</softValue>
<hardValue>* hard nproc 131072</hardValue>
</limitsConf>
<blockDev>
<target>/dev/sd?</target>
<operation>setra</operation>
<opValue>16384</opValue>
</blockDev>
<grub>
<appendValue>elevator=deadline</appendValue>
</grub>
</osParm>
<refPlatform>
<Dell>
<model>PowerEdge R710</model>
</Dell>
<hp>
<model>ProLiant DL185</model>
<ctrlUtil>/usr/sbin/hpacucli</ctrlUtil>
</hp>
</refPlatform>
</gpcheckosxml>
COMMAND NAME: gpcheckos
THIS UTILITY IS DEPRECATED - USE gpcheck INSTEAD.
......@@ -18,7 +18,7 @@ gpcrondump -x database_name
[ -c [ --cleanup-date yyyymmdd | --cleanup-total n ] ]
[-z] [-r] [-f <free_space_percent>] [-b] [-h] [-H] [-j | -k]
[-g] [-G] [-C] [-d <master_data_directory>] [-B <parallel_processes>]
[-a] [-q] [-y <reportfile>] [-l <logfile_directory>]
[-a] [-q] [-l <logfile_directory>]
[--email-file <path_to_file> ] [-v]
{ [-E encoding] [--inserts | --column-inserts] [--oids]
[--no-owner | --use-set-session-authorization]
......@@ -833,17 +833,6 @@ OPTIONS
multiple databases.
-y <reportfile>
This option is deprecated and will be removed in a future release. If
specified, a warning message is returned stating that the -y option is
deprecated.
Specifies the full path name where a copy of the backup job log file is
placed on the master host. The job log file is created in the master
data directory or if running remotely, the current working directory.
-z (no compression)
Do not use compression. Default is to compress the dump files using
......
COMMAND NAME: gpdetective
Collects diagnostic information from a running Greenplum Database system.
The gpdetective utility is deprecated and will be removed in a future release.
*****************************************************
SYNOPSIS
*****************************************************
gpdetective [-h <hostname>] [-p <port>] [-U <username>] [-P <password>]
[--start_date <number_of_days> | <YYYY-MM-DD>]
[--end_date <YYYY-MM-DD>]
[--diagnostics a|n|s|o|c]
[--logs a|n|<dbid>[,<dbid>,... | -<dbid>]]
[--cores t|f]
[--pg_dumpall t|f] [--pg_dump_options <option>[,...]]
[--tempdir <temp_dir>]
[--connect t|f]
gpdetective -?
gpdetective -v
*****************************************************
DESCRIPTION
*****************************************************
The gpdetective utility collects information from a running Greenplum
Database system and creates a bzip2-compressed tar output file. This
output file can then be sent to Greenplum Customer Support to help with
the diagnosis of Greenplum Database errors or system failures. The
gpdetective utility runs the following diagnostic tests:
* gpstate to check the system status
* gpcheckos to verify the recommended OS settings on all hosts
* gpcheckcat and gpcheckdb to check the system catalog tables
for inconsistencies
gpdetective captures the following files and Greenplum system information:
* postgresql.conf configuration files
* log files (master and segments)
* Greenplum Database system configuration information
* (optional) Core files
* (optional) Schema DDL dumps for all databases and global objects
A bzip2-compressed tar output file containing this information is created
in the current directory with a file name of gpdetective<timestamp>.tar.bz2.
*****************************************************
OPTIONS
*****************************************************
--connect t|f
Specifies if gpdetective should connect to the database to obtain
system information. The default is true (t). If false (f),
gpdetective only gathers information it can obtain without making
a connection to the database. This information includes (from the
master host):
* Log files
* The <master_data_directory>/postgresql.conf file
* The ~/gpAdminLogs directory
* gpcheckos output
* Core files
--cores t|f
Determines whether or not the utility retrieves core files. The
default is true (t).
--diagnostics a|n|s|o|c
Specifies the diagnostic tests to run: all (a), none (n),
operating system (o) diagnostics, or catalog (c) diagnostics.
The default is all (a).
--end_date YYYY-MM-DD
Sets the end date for the diagnostic information collected. The
collected information ends at 00:00:00 of the specified date.
-h hostname
The host name of the machine on which the Greenplum master
database server is running. If not specified, reads from the
environment variable PGHOST or defaults to localhost.
--logs a|n|dbid_list
Specifies which log file(s) to retrieve: all (a), none (n), a
comma separated list of segment dbid numbers, or a range of dbid
numbers divided by a dash (-) (for example, 3-6 retrieves logs
from segments 3, 4, 5, and 6). The default is all (a).
-P password
If Greenplum Database is configured to use password authentication,
you must also supply the database superuser password. If not specified,
reads from ~/.pgpass if it exists.
--pg_dumpall t|f
Determines whether or not the utility runs pg_dumpall to collect
schema DDL for all databases and global objects. The default is true (t).
--pg_dump_options option[,...]
If --pg_dumpall is true, specifies a comma separated list of dump
options to use when the pg_dumpall utility is called. See pg_dumpall
for a valid list of dump options.
-p port
The TCP port on which the Greenplum master database server is listening
for connections. If not specified, reads from the environment variable
PGPORT or defaults to 5432.
--start_date number_of_days | YYYY-MM-DD
Sets the start date for the diagnostic information collected. Specify
either the number of days prior, or an explicit past date.
--tempdir temp_dir
Specifies the temporary directory used by gpdetective. The default
value is determined by the $TEMP, $TMP and $TMPDIR environment variables.
-U gp_superuser
The Greenplum database superuser role name to connect as (typically gpadmin). If not specified, reads from the environment variable PGUSER or defaults to the current system user name.
-v (show utility version)
Displays the version of this utility.
-? (help)
Displays the utility usage and syntax.
*****************************************************
EXAMPLES
*****************************************************
Collect all diagnostic information for a Greenplum Database system
and supply the required connection information for the master host:
gpdetective -h mdw -p 54320 -U gpadmin -P mypassword
Run diagnostics and collect all logs and system information for the
past two days:
gpdetective --start_date 2
Do not run diagnostic tests or schema dumps, just collect the log
files of the master and segment 3:
gpdetective --diagnostics n --logs -1,3 --pg_dumpall f
*****************************************************
SEE ALSO
*****************************************************
gpstate, gpcheckos, pg_dumpall
\ No newline at end of file
COMMAND NAME: gpsuspend
Pause and resume a running Greenplum Database
******************************************************
SYNOPSIS
******************************************************
gpsuspend --pause [--batchsize batchsize] [--noninteractive]
gpsuspend --resume --pausefile pausefile_name [--batchsize batchsize]
gpsuspend -? | -h | --help
Prerequisites:
* You are logged in as the Greenplum Database superuser (gpadmin).
* You are on the machine that is running the master database
* You are not running --pause on an already paused database
*******************************************************
DESCRIPTION
*******************************************************
The gpsuspend utility can pause a running instance of Greenplum Database.
The utility is first run in 'pause' mode which will pause the database.
In 'pause' mode, the successful output of the command will print the
location of a generated pausefile which can be used to restore the
system state.
In 'resume' mode you must pass the location of the pause file which describes
the list of segment hosts in a Greenplum database and can be used to resume
the paused system
By default the utility is run in interactive mode. In interactive mode
The utitility will stop after pausing the database and wait for user entry.
At this point the database is paused. When the administrator is ready to
resume the database they can use the prompt to enter 'resume' and the database
will be resumed. To disable interactive mode and run 'pause' and 'resume'
independtly use the --noninteractive option with --pause.
The utility pauses the database using unix signals STOP and CONT. If you want
to confirm that the database is paused you can use gpssh and enter the command
ps ax | grep postgres | grep -v grep. This will list all postgres processes
on your cluster and the run state. All processes should be in a STOP state.
Also note, the order in which the processes are paused and resumed is important.
First the master postgres instance is paused and then the segments. Also within
a postgres instance, first the postmaster process is paused and then its children.
********************************************************
OPTIONS
********************************************************
-h (help)
Displays the online help.
--pause
Sets the utility into 'pause' mode
--resume
Sets the utility into 'resume' mode
--pausefile <pausefilename>
This option is used in 'resume' mode for the utility to know the
location of the segments while the database is paused and inaccessible.
The file is generated to the GPHOME directory during 'pause' mode.
--noninteractive
This option will disable the default interactive mode.
-B <batch_size>
The number of worker threads for connecting to segment hosts.
By making this number higher, more parallel ssh connections will be
made in order to complete the job faster.
--verbose | -v (verbose)
Verbose debugging output.
-? | h (help)
Displays the online help.
*********************************************************
EXAMPLES
*********************************************************
Pause a running Greenplum database:
$ gpsuspend --pause --noninteractive
Resume a running Greenplum database using a pausefile:
$ gpsuspend --resume --pausefile /home/gpadmin/greenplum-db/./gp_pause.20091113.2158.dat
Running in interactive mode:
$ gpsuspend --pause
Database is paused. When you are ready, type a command below to resume or quit.
quit|resume (default=quit):
$ resume
--done--
**********************************************************
SEE ALSO
**********************************************************
gpstart, gpstop
......@@ -61,7 +61,7 @@ static bool testPartitioningSupport(void);
static bool transformPassThroughParms(InputOptions * pInputOpts);
static bool copyFilesToSegments(InputOptions * pInputOpts, SegmentDatabaseArray *segDBAr);
static int getRemoteVersion(void);
static bool no_expand_children; /* Do not expand child partitions. This option is passed from gpcrondump.py */
static bool no_expand_children; /* Do not expand child partitions. This option is passed from gpcrondump */
/*
* static and extern global variables left over from pg_dump
......
......@@ -883,7 +883,7 @@ main(int argc, char **argv)
#ifdef USE_DDBOOST
if (dd_boost_enabled)
{
/* The storage unit is created by the gpcrondump.py, before all the agents are executed */
/* The storage unit is created by the gpcrondump, before all the agents are executed */
/* Hence it is always false here */
/* remote is always false when doing backup to primary DDR */
int err = DD_ERR_NONE;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册