提交 b03f5fb5 编写于 作者: H Heikki Linnakangas 提交者: Xin Zhang

Remove two tests for old bugs that were specific to persisent tables.

There is no reason to believe that these bugs could reappear, in the same
form.
上级 82a25329
......@@ -51,9 +51,10 @@ storage_uao_and_transactionmanagement:
-s storage/transaction_management \
-s storage/uao
# WALREP_FIXME: there are no "peristent" tests anymore. Rename this
# target, and fix concourse pipelines accordingly.
storage_persistent_accessmethods_and_vacuum:
$(TESTER) $(DISCOVER) -t tincrepo/mpp/gpdb/tests/storage \
-s persistent \
-s access_methods \
-s vacuum \
-q "class!=XidlimitsTests"
......
"""
Copyright (c) 2004-Present Pivotal Software, Inc.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
from time import sleep
from tinctest.lib import run_shell_command
import tinctest
import unittest2 as unittest
from mpp.gpdb.tests.storage.lib.dbstate import DbStateClass
from mpp.gpdb.tests.storage.lib import Database
from tinctest.lib import local_path
from tinctest import TINCTestCase
from mpp.lib.PSQL import PSQL
class Steps(TINCTestCase):
dbname = 'mpp11179_db'
@classmethod
def setUpClass(cls):
tinctest.logger.info('Running Verification...')
def test_runsql(self):
tinctest.logger.info('Begin a transaction')
cmd_str = 'gpfaultinjector -p %s -m async -s 1 -f dtm_broadcast_prepare -y reset -o 0' % os.getenv('PGPORT')
results={'rc':0, 'stdout':'', 'stderr':''}
run_shell_command(cmd_str, results=results)
PSQL.run_sql_file(local_path('drop_table.sql'), dbname=Steps.dbname)
cmd_str = 'gpfaultinjector -p %s -m async -s 1 -f dtm_broadcast_prepare -y suspend -o 0' % os.getenv('PGPORT')
results={'rc':0, 'stdout':'', 'stderr':''}
run_shell_command(cmd_str, results=results)
if int(results['rc']) !=0:
raise Exception('gpfaultinjector failed to suspend the fault')
PSQL.run_sql_file(local_path('create_table.sql'), dbname=Steps.dbname)
def test_checkpoint(self):
tinctest.logger.info('Issue Checkpoint')
PSQL.run_sql_file(local_path('checkpoint.sql'), dbname=Steps.dbname)
def test_gprestart(self):
tinctest.logger.info('Restart database after immediate shutdown')
sleep(20)
cmd_str = 'source %s/greenplum_path.sh;%s/bin/gpstop -air'% (os.environ['GPHOME'], os.environ['GPHOME'])
results={'rc':0, 'stdout':'', 'stderr':''}
run_shell_command(cmd_str, results=results)
if int(results['rc']) !=0:
raise Exception('Gp-Restart failed')
def test_gpcheckcat(self):
tinctest.logger.info('Run Checkcat to verify persistent table consistency')
dbstate = DbStateClass('run_validation')
dbstate.check_catalog(alldb = False, dbname = Steps.dbname)
"""
Copyright (c) 2004-Present Pivotal Software, Inc.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import tinctest
import mpp.gpdb.tests.storage.persistent
from mpp.gpdb.tests.storage.lib import Database
from tinctest.models.scenario import ScenarioTestCase
'''
Automation for MPP-11179
'''
class Mpp11179(ScenarioTestCase):
"""
@description test cases for MPP-11179
@created 2013-04-17 10:10:10
@modified 2013-04-22 17:10:15
@tags persistent tables schedule_persistent
@product_version gpdb: [4.1.3.x- main]
"""
@classmethod
def setUpClass(cls):
db = Database()
db.setupDatabase(dbname ='mpp11179_db')
@classmethod
def tearDownClass(cls):
db = Database()
db.dropDatabase(dbname = 'mpp11179_db' )
def test_scenario_setup(self):
tinctest.logger.info('Running test for MPP-11179')
test_case_list2 = []
test_case_list2.append("mpp.gpdb.tests.storage.persistent.mpp11179.steps.steps.Steps.test_runsql")
test_case_list2.append("mpp.gpdb.tests.storage.persistent.mpp11179.steps.steps.Steps.test_checkpoint")
test_case_list2.append("mpp.gpdb.tests.storage.persistent.mpp11179.steps.steps.Steps.test_gprestart")
self.test_case_scenario.append(test_case_list2)
test_case_list3 = []
test_case_list3.append("mpp.gpdb.tests.storage.persistent.mpp11179.steps.steps.Steps.test_gpcheckcat")
self.test_case_scenario.append(test_case_list3)
SELECT length(description)
FROM pg_description
INNER JOIN pg_proc ON objoid = pg_proc.oid
WHERE proname = 'f';
length
----------
23000002
(1 row)
SELECT length(description)
FROM pg_description
INNER JOIN pg_proc ON objoid = pg_proc.oid
WHERE proname = 'f';
-- start_ignore
drop function if exists f();
-- end_ignore
create or replace function f() returns void as $$
declare begin return; end;
$$ language plpgsql;
CREATE FUNCTION
set gp_test_system_cache_flush_force = plain;
SET
comment on function f() is $$
@long_comment@
$$;
COMMENT
-- start_ignore
drop function if exists f();
-- end_ignore
create or replace function f() returns void as $$
declare begin return; end;
$$ language plpgsql;
set gp_test_system_cache_flush_force = plain;
comment on function f() is $$
@long_comment@
$$;
"""
Copyright (c) 2004-Present Pivotal Software, Inc.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import tinctest
from tinctest.models.scenario import ScenarioTestCase
from mpp.models import MPPTestCase
from mpp.models import SQLTestCase
from mpp.lib.gpstart import GpStart
from mpp.lib.gpstop import GpStop
@tinctest.skipLoading('scenario')
class scenario_sql(SQLTestCase):
sql_dir = 'sql'
ans_dir = 'sql'
def get_substitutions(self):
return {'@long_comment@':
'comment_to_force_toast,' * 1000000
}
@tinctest.skipLoading('scenario')
class scenario_restart(MPPTestCase):
def restart(self):
# GpStop does not accept immedaite and restart both...
GpStop().run_gpstop_cmd(immediate=True)
GpStart().run_gpstart_cmd()
class mpp16611(MPPTestCase, ScenarioTestCase):
"""
@description Test for the RelcacheInvalidate VS persistent table
@created 2013-02-12 00:00:00
@modified 2014-06-12 00:00:00
@tags mpp16160 schedule_persistent echo
@product_version gpdb: [4.2.5.0- main]
"""
def test_pt_relation_cache(self):
"""
The issue of mpp16611 is that persistent table (PT) information
in Relation cache is lost during heap_insert after fetching it
from gp_relation_node, if concurrent cache invalidation message
overflows, and if the heap_insert goes saving values to toast
table. We should restore the PT info in that case.
The following is the series of events.
- Session 1 inserts a row worthy of toasting into a table.
- After session 1 picks up the gp_relation_node information for
this table, but prior to relation_open, Session 2 jumps in.
- Session 2 needs to overflow shared invalidation message, which
causes Session 1 to rebuild all relcache (and catcache) rebuild
regardless whether the cache entries actually need to be
updated. The overflow threshold is defined by MAXNUMMESSAGES
in sinvaladt.h
- Session 1 proceeds to relation_open and
AcceptInvalidationMessages, and during RelationClearRelation,
it rebuilds its relcache information which erroneously loses
the gp_relation_node information.
- Session 1 persists this incorrect gp_relation_node information
to the xlog during XLogInsert.
- If we then crash immediately, and rely upon xlog replay during
crash recovery, we hit the heap_insert_redo error. It seems
we hit the heap_insert_redo error in segments, but in master it
just loses the data, which this test shows.
In this test, we force relcache refresh by gp_test_system_cache_flush_force = plain
"""
package_name = 'mpp.gpdb.tests.storage.persistent.mpp16611'
prefix = self.package_name + '.test_mpp16611'
self.test_case_scenario.append(['%s.scenario_sql.test_workload' % prefix])
self.test_case_scenario.append(['%s.scenario_sql.test_check_catalog' % prefix])
self.test_case_scenario.append(['%s.scenario_restart.restart' % prefix])
self.test_case_scenario.append(['%s.scenario_sql.test_check_catalog' % prefix])
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册