提交 a2c772c5 编写于 作者: H Heikki Linnakangas 提交者: Xin Zhang

Remove obsolete PT rebuild TINC tests.

上级 6ef6935d
......@@ -134,14 +134,6 @@ crash_recovery_schema_topology:
-p test_crash_recovery_schema_topology.py \
-p test_reindex_pg_class.py
# Crash recovery test during ptrebuild
persistent_table_rebuild:
$(TESTER) $(DISCOVER) \
-s tincrepo/mpp/gpdb/tests/storage/persistent_tables \
-p test_PT_RebuildPT.py
runaway_query:
$(TESTER) \
resource_management.runaway_query.runaway_query_scenario.test_runaway_query_scenario.RQTScenarioTestCase \
......
-- Rebuild Persistent Object on Master --
select * from gp_persistent_reset_all();
select * from gp_persistent_build_all(false);
-- Rebuild Persistent Object on Segment --
select * from gp_persistent_reset_all();
select * from gp_persistent_build_all(true);
"""
Copyright (c) 2004-Present Pivotal Software, Inc.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import tinctest
from persistent_rebuild_utility import PTRebuildUtil
from mpp.gpdb.tests.storage.persistent_tables.fault.genFault import Fault
from tinctest import TINCTestCase
''' Global Persistent table Rebuild -Test Enchantment ParisTX-PT '''
class RebuildPersistentObjectsTest(TINCTestCase):
def test_rebuild_persistent_objects_segment(self):
ptutil = PTRebuildUtil()
if ptutil.check_dbconnections():
tinctest.logger.info('As there are active Database connections, cant rebuild PT')
return
(hostname, port) = ptutil.get_hostname_port_of_segment()
tinctest.logger.info('Rebuilding Global Persistent Object on segment %s : %s'%(hostname, port))
ptutil.persistent_Rebuild(hostname, port, 'Segment')
def test_rebuild_persistent_objects_master(self):
ptutil = PTRebuildUtil()
if ptutil.check_dbconnections():
tinctest.logger.info('As there are active Database connections, cant rebuild PT')
return
tinctest.logger.info('Rebuilding Global Persistent Object on Master')
ptutil.persistent_Rebuild()
class AbortRebuildPersistentObjectsTest(TINCTestCase):
@classmethod
def setUpClass(cls):
tinctest.logger.info('Abort Rebuilding Global Persistent Object process...')
def test_rebuild_persistent_objects(self):
ptutil = PTRebuildUtil()
if ptutil.check_dbconnections():
tinctest.logger.info('As there are active Database connections, cant rebuild PT')
return
(hostname, port) = ptutil.get_hostname_port_of_segment()
tinctest.logger.info('Rebuilding Global Persistent Object on segment %s : %s'%(hostname, port))
ptutil.persistent_Rebuild(hostname, port, 'Segment')
def test_stop_db(self):
newfault = Fault()
newfault.stop_db('i')
"""
Copyright (c) 2004-Present Pivotal Software, Inc.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import tinctest
import os
import datetime
from time import sleep
from random import randint
from tinctest.lib import local_path
from mpp.lib.PSQL import PSQL
from gppylib.commands.base import Command
GPDBNAME = str(os.environ["PGDATABASE"])
''' Utility's for Global Persistent table Rebuild test '''
class PTRebuildUtil(Command):
def __init__(self, cmd = None):
Command.__init__(self, 'Running fault command', cmd)
def _run_sys_cmd(self, cmd_str, validate = False):
'''helper function to run a sys cmd'''
tinctest.logger.info("execute:" +cmd_str)
cmd = PTRebuildUtil(cmd_str)
cmd.run(validateAfter = validate)
return cmd.get_results()
def check_dbconnections(self):
''' Check if database has any active connections '''
sql_cmd = 'select count(*) FROM pg_stat_activity;'
conCount = int(PSQL.run_sql_command(sql_cmd).split('\n')[3]) - 1
if conCount > 0:
print "There are %s Active connection on Database" %conCount
return True
else :
return False
def get_hostname_port_of_segment(self):
''' Get hostname and Port no of Primary segments '''
# Get primary segments
cmd_str = "select hostname, port from gp_segment_configuration where role = 'p' and content != '-1';"
seglist = PSQL.run_sql_command(cmd_str).split('\n')
#select any segment randomly
segNo = 2 + 1 #randint( 1, 2) : Commented so that it will rebuild same segment in 2nd re-try
(hostname, port) = seglist[segNo].split('|')
return (hostname, port)
def persistent_Rebuild(self, hostname = None, port = None, type = 'Master'):
''' Rebuild Persistent Object by connecting in Utility mode '''
sql_file = local_path('persistent_Rebuild_%s.sql'%type)
now = datetime.datetime.now()
timestamp = '%s%s%s%s%s%s%s'%(now.year,now.month,now.day,now.hour,now.minute,now.second,now.microsecond)
out_file = sql_file.replace('.sql', timestamp + '.out')
PSQL.run_sql_file(sql_file = sql_file, PGOPTIONS = '-c gp_session_role=utility', host = hostname, port = port, out_file = out_file)
"""
Copyright (c) 2004-Present Pivotal Software, Inc.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from tinctest.main import TINCException
class ClusterStateException(TINCException): pass
"""
Copyright (c) 2004-Present Pivotal Software, Inc.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import tinctest
from time import sleep
from mpp.gpdb.tests.storage.persistent_tables.fault.genFault import Fault
from mpp.gpdb.tests.storage.lib.common_utils import checkDBUp
from mpp.lib.gprecoverseg import GpRecover
from tinctest import TINCTestCase
'''
System recovery
'''
class RecoveryTest(TINCTestCase):
def check_db(self):
checkDBUp()
def test_recovery(self):
gprecover = GpRecover()
gprecover.incremental()
gprecover.wait_till_insync_transition()
def test_recovery_full(self):
gprecover = GpRecover()
gprecover.full()
gprecover.wait_till_insync_transition()
def test_rebalance_segment(self):
newfault = Fault()
self.assertTrue(newfault.rebalance_cluster(),"Segments not rebalanced!!")
def test_recovery_abort(self):
newfault = Fault()
sleep(100)
newfault._run_sys_cmd('gprecoverseg -a &')
newfault.stop_db('i')
def test_recovery_full_abort(self):
newfault = Fault()
sleep(100)
newfault._run_sys_cmd('gprecoverseg -aF &')
newfault.stop_db('i')
'''
DB Opration
'''
class GPDBdbOps(TINCTestCase):
def gpstop_db(self):
''' Stop database with normal mode '''
newfault = Fault()
sleep(5)
newfault.stop_db()
def gpstop_db_immediate(self):
''' Stop database with immediate mode '''
newfault = Fault()
sleep(5)
newfault.stop_db('i')
def gpstart_db(self):
''' Start Database with normal mode '''
sleep(5)
newfault = Fault()
newfault.start_db()
def gprestart_db(self):
''' Restarts the Database '''
sleep(5)
newfault = Fault()
newfault.restart_db()
def gpstart_db_restricted(self):
''' Start Database with Restricted mode '''
sleep(5)
newfault = Fault()
newfault.start_db('R')
"""
Copyright (c) 2004-Present Pivotal Software, Inc.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import tinctest
import os
import random
from time import sleep
from random import randint
from tinctest.lib import local_path
from mpp.lib.PSQL import PSQL
from gppylib.commands.base import Command
from subprocess import Popen, PIPE
from mpp.lib.config import GPDBConfig
GPDBNAME = str(os.environ["PGDATABASE"])
'''
Creates Faults for the scenario
'''
class Fault(Command):
def __init__(self, cmd = None):
Command.__init__(self, 'Running fault command', cmd)
def _run_sys_cmd(self, cmd_str, validate = False):
'''helper function to run a sys cmd'''
tinctest.logger.info("execute:" +cmd_str)
cmd = Fault(cmd_str)
cmd.run(validateAfter = validate)
return cmd.get_results()
def stop_db(self,options = None):
''' Stops the greenplum DB based on the options provided '''
cmd_str = "source $GPHOME/greenplum_path.sh; gpstop -a"
if options is None:
options = ''
cmd_str = cmd_str + options
tinctest.logger.info("Starting the db operation: %s "%cmd_str)
result = self._run_sys_cmd(cmd_str)
return result
def start_db(self,options = None):
''' Start the greenplum DB based on the options provided '''
cmd_str = "source $GPHOME/greenplum_path.sh; gpstart -a"
if options is None:
options = ''
cmd_str = cmd_str + options
tinctest.logger.info("Starting the db operation: %s "%cmd_str)
result = self._run_sys_cmd(cmd_str)
return result
def restart_db(self,options = 'ir'):
''' Restarts the greenplum DB '''
return self.stop_db(options)
def run_recovery(self,options = None):
'''Runs the incremental recovery'''
tinctest.logger.info('Invoking gprecoverseg to bring the segments up')
if options is None:
options = ''
cmd_str = "source $GPHOME/greenplum_path.sh; gprecoverseg -a" + options
result = self._run_sys_cmd(cmd_str, False)
return result.stdout
def drop_db(self, dbname = GPDBNAME):
''' Drop database '''
tinctest.logger.info('Drop database ' + dbname)
cmd = 'drop_db '+ dbname
result = self._run_sys_cmd(cmd)
tinctest.logger.info(result.stderr)
def create_db(self, dbname = GPDBNAME):
''' Create Database '''
tinctest.logger.info('Create database ' + dbname)
cmd = 'createdb '+ dbname
result = self._run_sys_cmd(cmd)
tinctest.logger.info(result.stderr)
def rebalance_cluster(self):
config = GPDBConfig()
self.run_recovery('r')
rtrycnt = 0
while ((config.is_not_insync_segments()) == False and rtrycnt <= 5):
tinctest.logger.info("Waiting [%s] for DB to recover" %rtrycnt)
sleep(10)
rtrycnt = rtrycnt + 1
#Many time it has been observed that gprecoverseg -ar marks segment down
if config.is_not_insync_segments():
return True
else:
self.run_recovery()
rtrycnt = 0
max_rtrycnt = 10
while ((config.is_not_insync_segments()) == False and rtrycnt < max_rtrycnt):
tinctest.logger.info("waiting [%s] for DB to recover" %rtrycnt)
sleep(10)
rtrycnt = rtrycnt + 1
if rtrycnt < max_rtrycnt:
return True
else:
tinctest.logger.error("Segments not up after incremental recovery!!")
return False
"""
Copyright (c) 2004-Present Pivotal Software, Inc.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import fnmatch
import tinctest
from tinctest.lib import local_path
from mpp.gpdb.tests.storage.persistent_tables.sqls.generate_sqls import GenerateSqls
from mpp.lib.PSQL import PSQL
'''
Creates and runs the pre-requisite SQL files before the actual Load starts
'''
class InitialSetup():
def createSQLFiles(self):
tinctest.logger.info('Creating the SQL files under setup folder')
schema = GenerateSqls()
table_types = ('ao', 'co', 'heap')
for table_type in table_types:
schema.create_table_setup('table',table_type,table_type)
schema.create_table_setup('insert_tb',table_type,table_type)
schema.create_table_setup('insert_tb',table_type + '_part',table_type,'yes')
schema.create_table_setup('drop_tb',table_type,table_type)
def runSQLFiles(self):
tinctest.logger.info('Running SQL files under the setup folder')
for file in os.listdir(local_path('setup')):
if fnmatch.fnmatch(file,'*_table_pre.sql'):
PSQL.run_sql_file(local_path('setup/' + file))
"""
Copyright (c) 2004-Present Pivotal Software, Inc.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import inspect
import os
import random
import sys
import tinctest
from mpp.lib.PSQL import PSQL
class GenerateSqls():
def local_path(self, filename):
"""Return the absolute path of the input file.:Overriding it here to use the absolute path instead of relative"""
frame = inspect.stack()[1]
source_file = inspect.getsourcefile(frame[0])
source_dir = os.path.dirname(os.path.abspath(source_file))
return os.path.join(source_dir, filename)
def writeConcurrentcyIteration(self, filename, concurrency=None, iterations=None):
if iterations is None:
iterations = random.randint(5,10)
#iterations = 4
if concurrency is None:
concurrency = random.randint(5,10)
#concurrency = 5
filename.write("-- @concurrency %s \n" % concurrency)
filename.write("-- @iterations %s \n" % iterations)
def column_generation(self, col_count=10):
table_def = "c1 int, c2 char(10), c3 date, c4 text, c5 timestamp, c6 numeric, c7 varchar, c8 int, c9 text, c10 date"
if col_count == 100:
for i in range(1, 10):
table_def == table_def + ' , ' + table_def
return table_def
def data_generation(self, col_count=10):
insert_row = "generate_series(1,6), 'pt_test', '2013-01-01', 'persistent tables testing', '2002-11-13 03:51:15+1359', generate_series(20,24), 'persistent tables' , 2013, 'again same text pt_testing', '2013-03-25'"
if col_count == 100 :
for i in range(1, 10):
insert_row == insert_row + ' , ' + insert_row
return insert_row
def create_tb_string(self, tablename, storage_type, compression='no', partition='no'):
storage_orientation = {'ao':'row', 'co': 'column'}
if storage_type == 'co':
col_count = 100
else:
col_count = 10
table_definition = self.column_generation(col_count)
create_table_str = "Create table " + tablename + "(" + table_definition + ")"
part_string = " Partition by range(c1) \n Subpartition by range(c6) Subpartition Template (default subpartition subothers, start(20) end(24) every(1)) \n (default partition others, start (1) end (6) every (1)) "
compression_list = ['quicklz', 'zlib']
if storage_type == "heap":
create_table_str = create_table_str
elif storage_type == "co" or storage_type == "ao" :
create_table_str = create_table_str + " WITH(appendonly = true, orientation = " + storage_orientation[storage_type] + ") "
if compression == "yes" :
create_table_str = create_table_str[:-2] + ", compresstype = " + random.choice(compression_list) + ") "
if partition == "yes" :
create_table_str = create_table_str + part_string
create_table_str = create_table_str + ";\n"
return create_table_str
def create_tables(self, table_type, storage_type, compression='no', partition='no'):
''' Generate the DDLs for different types of tables '''
if storage_type == 'co':
col_count = 100
else:
col_count = 10
# Lets have three files with random statements written to them.
# First write concurrency and iteration to all the files
filename = self.local_path('%s_1.sql' % table_type)
file1 = open(filename , "w")
self.writeConcurrentcyIteration(file1)
filename = self.local_path('%s_1.ans' % table_type)
file1_ans = open(filename, "w")
filename = self.local_path('%s_2.sql' % table_type)
file2 = open(filename , "w")
self.writeConcurrentcyIteration(file2)
filename = self.local_path('%s_2.ans' % table_type)
file2_ans = open(filename, "w")
filename = self.local_path('%s_3.sql' % table_type)
file3 = open(filename , "w")
self.writeConcurrentcyIteration(file3)
filename = self.local_path('%s_3.ans' % table_type)
file3_ans = open(filename, "w")
for t in range(1, 201):
tablename = '%s_table_%s' % (table_type, t)
create_table_str = self.create_tb_string(tablename, storage_type, compression, partition)
sql_file = random.choice([file1, file2, file3])
if t % 10 == 0:
sql_file.write('Begin; \n')
sql_file.write('%s \n' % create_table_str)
# Due to the Deadlock issue index creation part is commented out Ref. MPP-19781
# if t % 5 == 0:
# create_index_str = "Create index " + tablename + "_idx_" + str(t) + " on " + tablename + " (c1);\n"
# sql_file.write('%s \n' % create_index_str)
insert_str = 'Insert into %s values ( %s ); \n ' % (tablename, self.data_generation(col_count))
sql_file.write('%s \n' % insert_str)
sql_file.write("Drop table if exists %s; \n" % tablename)
if t % 20 == 0 :
sql_file.write('Commit;\n')
elif t % 10 == 0 and t % 20 != 0 :
sql_file.write('Abort; \n')
def create_table_setup(self, table_prefix, table_type, storage_type, partition='no'):
''' This is few create tables that are a pre-req for the actual load'''
filename = self.local_path('setup/%s_%s_pre.sql' % (table_type, table_prefix))
sql_file = open(filename , "w")
self.writeConcurrentcyIteration(sql_file, concurrency=1, iterations=1)
for t in range(1, 201):
tablename = '%s_%s_%s' % (table_type, table_prefix, t)
create_table_str = self.create_tb_string(tablename, storage_type)
sql_file.write('Drop table if exists %s; \n' % tablename)
sql_file.write('%s \n' % create_table_str)
def insert_rows(self, table_type, storage_type, partition='no'):
#self.create_table_setup('insert_tb', table_type, storage_type, partition)
filename = self.local_path('%s_insert_data.sql' % table_type)
sql_file = open(filename , "w")
#self.writeConcurrentcyIteration(sql_file, concurrency=100)
self.writeConcurrentcyIteration(sql_file)
filename = self.local_path('%s_insert_data.ans' % table_type)
sql_file_ans = open(filename , "w")
if storage_type == 'co':
col_count = 100
else:
col_count = 10
for t in range(1, 201):
tablename = '%s_insert_tb_%s' % (table_type, t)
if t % 3 == 0:
insert_str = 'Begin; \n Insert into %s values ( %s ); \nCommit; \n ' % (tablename, self.data_generation(col_count))
sql_file.write('%s \n' % insert_str)
else:
insert_str = 'Begin; \n Insert into %s values ( %s ); \nAbort; \n ' % (tablename, self.data_generation(col_count))
sql_file.write('%s \n' % insert_str)
def drop_recreate_table(self, table_type, storage_type):
#self.create_table_setup('drop_tb', table_type, storage_type)
filename = self.local_path('%s_drop_tb.sql' % table_type)
sql_file = open(filename , "w")
self.writeConcurrentcyIteration(sql_file)
filename = self.local_path('%s_drop_tb.ans' % table_type)
sql_file_ans = open(filename , "w")
for t in range(1, 201):
tablename = '%s_drop_tb_%s' % (table_type, random.randint(1, 200))
drop_str = 'Drop table %s; \nCreate table %s (i int, t text); \n' % (tablename, tablename)
if t % 5 == 0 and t % 10 != 0:
sql_file.write('Begin; \n %sCommit; \n' % drop_str)
elif t % 5 == 0 :
sql_file.write('Begin; \n %sAbort; \n ' % drop_str)
else :
sql_file.write('%s \n' % drop_str)
def generate_sqls(self):
for storage_type in ('ao', 'co', 'heap'):
self.create_tables(storage_type, storage_type)
self.create_tables(storage_type + '_part', storage_type, 'no', 'yes')
if storage_type != 'heap' :
self.create_tables(storage_type + "_compr", storage_type, "yes")
self.insert_rows(storage_type, storage_type)
self.insert_rows(storage_type + '_part', storage_type, 'yes')
self.drop_recreate_table(storage_type, storage_type)
"""
Copyright (c) 2004-Present Pivotal Software, Inc.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import tinctest
from mpp.gpdb.tests.storage.persistent_tables.sqls.InitialSetup import InitialSetup
from tinctest.models.scenario import ScenarioTestCase
from mpp.gpdb.tests.storage.persistent_tables.fault.genFault import Fault
from mpp.gpdb.tests.storage.persistent_tables import ClusterStateException
from mpp.lib.gprecoverseg import GpRecover
''' Persistent Tables test :: Rebuild persistent tables '''
class PersistentTables(ScenarioTestCase):
"""
@description Test Cases for Persistent Table testing QA-2417 - Crash RecoveryTest
@created 2013-03-29 10:10:10
@modified 2013-05-24 17:10:15
@tags persistent tables schedule_persistent_tables
@product_version gpdb:
"""
def __init__(self, methodName):
super(PersistentTables, self).__init__(methodName)
@classmethod
def setUpClass(cls):
super(PersistentTables,cls).setUpClass()
tinctest.logger.info('Setup Database ...')
setdb = Fault()
setdb.create_db()
tinctest.logger.info('Running the initial setup sql files')
setup = InitialSetup()
setup.createSQLFiles()
setup.runSQLFiles()
# Replacing the setUp method with the following one, as setUp method is called twice redundantly
def setUp(self):
''' Need to rebalance cluster as primary segments are killed during test'''
super(PersistentTables,self).setUp()
tinctest.logger.info('***Rebalancing cluster state***')
fault_recovery = Fault()
if not fault_recovery.rebalance_cluster():
raise ClusterStateException("**FATAL!! Cluster rebalancing failed - segments went down after \
gprecoverseg -ar, even incremental recovery couldn't bring the segments up. \
Cannot proceed with the tests!! ")
''' Global Persistent table Rebuild -Test Enchantment ParisTX-PT '''
def rebuild_persistent_table_objects(self, type = 'master'):
''' Rebuild Persistent Object '''
test_case_list1 = []
test_case_list1.append('mpp.gpdb.tests.storage.persistent_tables.PTRebuild.persistent_rebuild_scenario.RebuildPersistentObjectsTest.test_rebuild_persistent_objects_%s' %type)
self.test_case_scenario.append(test_case_list1)
#Check the Sate of DB and Cluster
test_case_list2 = []
test_case_list2.append("mpp.gpdb.tests.storage.lib.dbstate.DbStateClass.check_catalog")
self.test_case_scenario.append(test_case_list2)
test_case_list3 = []
test_case_list3.append("mpp.gpdb.tests.storage.persistent_tables.test_PT_RebuildPT.PersistentTables.wait_till_insync_transition")
self.test_case_scenario.append(test_case_list3)
test_case_list4 = []
test_case_list4.append("mpp.gpdb.tests.storage.lib.dbstate.DbStateClass.check_mirrorintegrity")
self.test_case_scenario.append(test_case_list4)
def wait_till_insync_transition(self):
self.gpr = GpRecover()
self.gpr.wait_till_insync_transition()
def test_rebuild_persistent_objects_master(self):
''' Rebuild Persistent Object on Master '''
self.rebuild_persistent_table_objects('master')
def test_rebuild_persistent_objects_segment(self):
''' Rebuild Persistent Object on Segment '''
self.rebuild_persistent_table_objects('segment')
# regarding "test_abort_pt_rebuild": this sets up a race between gpstop and doing up a `select * from
# gp_persistent_reset_all(); select * from gp_persistent_build_all(true);` the Storage team
# indicated that this scenario seems unnecessary (users do not generally use pt_rebuild,
# In other words, the test may create a repeatable red, but it isn't something currently judged worthy of inclusion.
def _unused_test_abort_pt_rebuild(self):
''' Abort Persistent Object Rebuild '''
test_case_list1 = []
test_case_list1.append('mpp.gpdb.tests.storage.persistent_tables.PTRebuild.persistent_rebuild_scenario.AbortRebuildPersistentObjectsTest.test_stop_db')
test_case_list1.append('mpp.gpdb.tests.storage.persistent_tables.PTRebuild.persistent_rebuild_scenario.AbortRebuildPersistentObjectsTest.test_rebuild_persistent_objects')
self.test_case_scenario.append(test_case_list1)
#Start Database
test_case_list2 = []
test_case_list2.append('mpp.gpdb.tests.storage.persistent_tables.fault.fault.GPDBdbOps.gpstart_db')
self.test_case_scenario.append(test_case_list2)
#Issue gpcheckcat
test_case_list3 = []
test_case_list3.append('mpp.gpdb.tests.storage.lib.dbstate.DbStateClass.check_catalog')
self.test_case_scenario.append(test_case_list3)
#Do recovery
#gprecoverseg Incr
test_case_list4 = []
test_case_list4.append('mpp.gpdb.tests.storage.persistent_tables.fault.fault.RecoveryTest.test_recovery')
self.test_case_scenario.append(test_case_list4)
#Re-balance segments to rebuild PT
test_case_list5 = []
test_case_list5.append('mpp.gpdb.tests.storage.persistent_tables.fault.fault.RecoveryTest.test_rebalance_segment')
self.test_case_scenario.append(test_case_list5)
#Do PT rebuild
test_case_list6 = []
test_case_list6.append('mpp.gpdb.tests.storage.persistent_tables.PTRebuild.persistent_rebuild_scenario.RebuildPersistentObjectsTest.test_rebuild_persistent_objects_segment')
self.test_case_scenario.append(test_case_list6)
#Check the Sate of DB and Cluster
test_case_list7 = []
test_case_list7.append("mpp.gpdb.tests.storage.lib.dbstate.DbStateClass.check_catalog")
self.test_case_scenario.append(test_case_list7)
test_case_list8 = []
test_case_list8.append("mpp.gpdb.tests.storage.persistent_tables.test_PT_RebuildPT.PersistentTables.wait_till_insync_transition")
self.test_case_scenario.append(test_case_list8)
test_case_list9 = []
test_case_list9.append("mpp.gpdb.tests.storage.lib.dbstate.DbStateClass.check_mirrorintegrity")
self.test_case_scenario.append(test_case_list9)
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册