提交 62125d58 编写于 作者: M Marbin Tan 提交者: Marbin Tan

Remove white spaces

上级 d55a374c
#!/usr/bin/env python
#
# Copyright (c) Greenplum Inc 2008. All Rights Reserved.
# Copyright (c) Greenplum Inc 2008. All Rights Reserved.
#
"""
TODO: module docs
......@@ -9,7 +9,7 @@ import sys
import os
import stat
try:
try:
from pygresql import pgdb
from gppylib.commands.unix import UserId
......@@ -28,22 +28,22 @@ class Pgpass():
"""
entries = []
valid_pgpass = True
def __init__(self):
HOME = os.getenv('HOME')
PGPASSFILE = os.getenv('PGPASSFILE', '%s/.pgpass' % HOME)
if not os.path.exists(PGPASSFILE):
return
st_info = os.stat(PGPASSFILE)
mode = str(oct(st_info[stat.ST_MODE] & 0777))
if mode != "0600":
print 'WARNING: password file "%s" has group or world access; permissions should be u=rw (0600) or less' % PGPASSFILE
self.valid_pgpass = False
return
try:
fp = open(PGPASSFILE, 'r')
try:
......@@ -61,7 +61,7 @@ class Pgpass():
'password': password }
self.entries.append(entry)
except:
print 'Invalid line in .pgpass file. Line number %d' % lineno
print 'Invalid line in .pgpass file. Line number %d' % lineno
lineno += 1
except IOError:
pass
......@@ -70,7 +70,7 @@ class Pgpass():
except OSError:
pass
def get_password(self, username, hostname, port, database):
for entry in self.entries:
if ((entry['hostname'] == hostname or entry['hostname'] == '*') and
......@@ -79,42 +79,42 @@ class Pgpass():
(entry['username'] == username or entry['username'] == '*')):
return entry['password']
return None
def pgpass_valid(self):
return self.valid_pgpass
class DbURL:
""" DbURL is used to store all of the data required to get at a PG
""" DbURL is used to store all of the data required to get at a PG
or GP database.
"""
pghost='foo'
pgport=5432
pgdb='template1'
pguser='username'
pgpass='pass'
pgpass='pass'
timeout=None
retries=None
retries=None
def __init__(self,hostname=None,port=0,dbname=None,username=None,password=None,timeout=None,retries=None):
if hostname is None:
self.pghost = os.environ.get('PGHOST', 'localhost')
else:
self.pghost = hostname
if port is 0:
self.pgport = int(os.environ.get('PGPORT', '5432'))
else:
self.pgport = int(port)
if dbname is None:
self.pgdb = os.environ.get('PGDATABASE', 'template1')
else:
self.pgdb = dbname
if username is None:
self.pguser = os.environ.get('PGUSER', os.environ.get('USER', UserId.local('Get uid')))
self.pguser = os.environ.get('PGUSER', os.environ.get('USER', UserId.local('Get uid')))
if self.pguser is None or self.pguser == '':
raise Exception('Both $PGUSER and $USER env variables are not set!')
else:
......@@ -130,7 +130,7 @@ class DbURL:
self.pgpass = os.environ.get('PGPASSWORD', None)
else:
self.pgpass = password
if timeout is not None:
self.timeout = int(timeout)
......@@ -148,7 +148,7 @@ class DbURL:
return '[' + s + ']'
return "%s:%d:%s:%s:%s" % \
(canonicalize(self.pghost),self.pgport,self.pgdb,self.pguser,self.pgpass)
(canonicalize(self.pghost),self.pgport,self.pgdb,self.pguser,self.pgpass)
def connect(dburl, utility=False, verbose=False,
......@@ -156,7 +156,7 @@ def connect(dburl, utility=False, verbose=False,
logConn=True):
if utility:
options = '-c gp_session_role=utility'
options = '-c gp_session_role=utility'
else:
options = ''
......@@ -169,7 +169,7 @@ def connect(dburl, utility=False, verbose=False,
# gpmigrator needs gpstart to make master connection in maintenance mode
if upgrade:
options += ' -c gp_maintenance_conn=true'
# bypass pgdb.connect() and instead call pgdb._connect_
# to avoid silly issues with : in ipv6 address names and the url string
#
......@@ -220,7 +220,7 @@ def connect(dburl, utility=False, verbose=False,
raise ConnectionError('Failed to connect to %s' % dbbase)
conn = pgdb.pgdbCnx(cnx)
#by default, libpq will print WARNINGS to stdout
if not verbose:
cursor=conn.cursor()
......@@ -233,18 +233,18 @@ def connect(dburl, utility=False, verbose=False,
cursor=conn.cursor()
cursor.execute("SET CLIENT_ENCODING='%s'" % encoding)
conn.commit()
cursor.close()
def __enter__(self):
cursor.close()
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
conn.__class__.__enter__, conn.__class__.__exit__ = __enter__, __exit__
return conn
return conn
def execSQL(conn,sql):
"""
def execSQL(conn,sql):
"""
If necessary, user must invoke conn.commit().
Do *NOT* violate that API here without considering
the existing callers of this function.
......@@ -293,7 +293,7 @@ def execSQLForSingleton(conn, sql):
def executeUpdateOrInsert(conn, sql, expectedRowUpdatesOrInserts):
cursor=conn.cursor()
cursor.execute(sql)
if cursor.rowcount != expectedRowUpdatesOrInserts :
raise Exception("SQL affected %s rows but %s were expected:\n%s" % \
(cursor.rowcount, expectedRowUpdatesOrInserts, sql))
......
......@@ -4,7 +4,7 @@
"""
gp_dbid.py
Copyright (c) EMC/Greenplum Inc 2011. All Rights Reserved.
Copyright (c) EMC/Greenplum Inc 2011. All Rights Reserved.
"""
import re
......@@ -53,8 +53,8 @@ class GpDbidFile:
def parse(self, f):
"""
Parse f, looking for matching dbid and standby_dbid expressions and
ignoring all other lines. Assigns dbid and/or standby_dbid to observed
values, converting matched values from strings to integers.
ignoring all other lines. Assigns dbid and/or standby_dbid to observed
values, converting matched values from strings to integers.
"""
INFO = self.logger.info
DEBUG = self.logger.debug
......@@ -93,7 +93,7 @@ class GpDbidFile:
f.write('standby_dbid = %d\n' % self.standby_dbid)
INFO('Wrote standby_dbid: %d to the file.' % self.standby_dbid)
def write_gp_dbid(self):
"""
Create or replace gp_dbid file with current values, changing
......@@ -152,7 +152,7 @@ if __name__ == '__main__':
d2.read_gp_dbid()
assert d.dbid == d2.dbid
assert d.standby_dbid == d2.standby_dbid
if os.path.exists(TESTDIR): shutil.rmtree(TESTDIR)
os.mkdir(TESTDIR)
unittest.main()
......
此差异已折叠。
#!/usr/bin/env python
#
# Copyright (c) Greenplum Inc 2008. All Rights Reserved.
# Copyright (c) Greenplum Inc 2008. All Rights Reserved.
#
#
# THIS IMPORT MUST COME FIRST
......@@ -15,7 +15,7 @@ from optparse import Option, OptionGroup, OptionParser, OptionValueError, SUPPRE
try:
import pickle
from gppylib.db import dbconn
from gppylib.gpparseopts import OptParser, OptChecker
from gppylib.gparray import *
......@@ -74,7 +74,7 @@ class GpStart:
self.wrapper_args=wrapper_args;
self.skip_standby_check=skip_standby_check;
self.logfileDirectory=logfileDirectory
#
# Some variables that are set during execution
#
......@@ -153,7 +153,7 @@ class GpStart:
cmd.run()
logger.debug("results of forcing master shutdown: %s" % cmd)
#TODO: check results of command.
# in order to fail out here we must have filespace configured and also
# have failed the consistency check
if filespace_configured and inconsistent_filespace:
......@@ -227,22 +227,22 @@ class GpStart:
def _check_version(self):
self.gpversion=gp.GpVersion.local('local GP software version check',self.gphome)
logger.info("Greenplum Binary Version: '%s'" % self.gpversion)
# It would be nice to work out the catalog version => greenplum version
# calculation so that we can print out nicer error messages when
# version doesn't match.
# version doesn't match.
bin_catversion = gp.GpCatVersion.local('local GP software catalag version check', self.gphome)
logger.info("Greenplum Catalog Version: '%s'" % bin_catversion)
dir_catversion = gp.GpCatVersionDirectory.local('local GP directory catalog version check', self.master_datadir)
# If it's in upgrade mode, we don't need to check the cat version because gpmigrator have checked
# it already.
if (self.specialMode != 'upgrade' and bin_catversion != dir_catversion):
logger.info("MASTER_DIRECTORY Catalog Version: '%s'" % dir_catversion)
logger.info("Catalog Version of master directory incompatible with binaries")
raise ExceptionNoStackTraceNeeded("Catalog Versions are incompatible")
######
def _check_master_running(self):
......@@ -276,7 +276,7 @@ class GpStart:
if primary_tli < standby_tli:
# stop the master we've started up.
cmd=gp.GpStop("Shutting down master", masterOnly=True,
cmd=gp.GpStop("Shutting down master", masterOnly=True,
fast=True, quiet=logging_is_quiet(),
verbose=logging_is_verbose(),
datadir=self.master_datadir)
......@@ -395,7 +395,7 @@ class GpStart:
cmd=gp.MasterStart('master in utility mode', self.master_datadir,
self.port, d.dbid, d.standby_dbid or 0,
numContentsInCluster, self.era,
wrapper=self.wrapper, wrapper_args=self.wrapper_args,
wrapper=self.wrapper, wrapper_args=self.wrapper_args,
specialMode=self.specialMode, timeout=self.timeout, utilityMode=True
);
cmd.run()
......@@ -424,7 +424,7 @@ class GpStart:
######
def _start(self, segmentsToStart, invalidSegments, inactiveSegments):
""" starts all of the segments, the master and the standby master
returns whether all segments that should be started were started successfully
note that the parameters do not list master/standby, they only list data segments
......@@ -448,7 +448,7 @@ class GpStart:
localeData = ":".join([self.lc_collate,self.lc_monetary,self.lc_numeric])
# this will eventually start gpsegstart.py
segmentStartOp = StartSegmentsOperation(self.pool,self.quiet, localeData, self.gpversion,
self.gphome, self.master_datadir, self.timeout,
self.gphome, self.master_datadir, self.timeout,
self.specialMode, self.wrapper, self.wrapper_args,
logfileDirectory=self.logfileDirectory)
segmentStartResult = segmentStartOp.startSegments(self.gparray, segmentsToStart, startMode, self.era)
......@@ -520,7 +520,7 @@ class GpStart:
toStart = [seg for seg in segs if dbIdsToNotStart.get(seg.getSegmentDbId()) is None]
return (toStart, invalid_segs, inactive_mirrors)
####
####
def _verify_enough_segments(self,startResult,gparray):
successfulSegments = startResult.getSuccessfulSegments()
mirroringFailures = [f.getSegment() for f in startResult.getFailedSegmentObjs() \
......@@ -572,7 +572,7 @@ class GpStart:
return False
return True
######
######
def _shutdown_segments(self,segmentStartResult):
logger.info("Commencing parallel segment instance shutdown, please wait...")
......@@ -631,7 +631,7 @@ class GpStart:
started = len(segmentStartResult.getSuccessfulSegments())
failedFromMirroring = len(mirroringFailures)
failedNotFromMirroring = len(nonMirroringFailures)
totalTriedToStart = started + failedFromMirroring + failedNotFromMirroring
totalTriedToStart = started + failedFromMirroring + failedNotFromMirroring
if failedFromMirroring > 0 or failedNotFromMirroring > 0 or logging_is_verbose():
logger.info("----------------------------------------------------")
......@@ -720,9 +720,9 @@ class GpStart:
######
def _start_final_master(self):
''' Last item in the startup sequence is to start the master.
''' Last item in the startup sequence is to start the master.
After starting the master we connect to it. This is done both as a check that the system is
After starting the master we connect to it. This is done both as a check that the system is
actually started but its also done because certain backend processes don't get kickstarted
until the first connection. The DTM is an example of this and it allows those initialization
messages to end up in the gpstart log as opposed to the user's psql session.
......@@ -749,7 +749,7 @@ class GpStart:
self.master_datadir, self.port, self.gparray.master.dbid, standby_dbid,
numContentsInCluster, self.era,
wrapper=self.wrapper, wrapper_args=self.wrapper_args,
specialMode=self.specialMode, restrictedMode=self.restricted, timeout=self.timeout,
specialMode=self.specialMode, restrictedMode=self.restricted, timeout=self.timeout,
max_connections=self.max_connections, disableMasterMirror=(not self.start_standby)
)
......@@ -793,8 +793,8 @@ class GpStart:
######
def _start_standby(self):
''' used to start the standbymaster if necessary.
''' used to start the standbymaster if necessary.
returns if the standby master was started or not
'''
if self.start_standby and self.gparray.standbyMaster is not None:
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册