提交 714a8375 编写于 作者: A Abhijit Subramanya

Fix vacuum and access method tests.

 - Update the answer files for vacuum related tests.
 - Update the SQL used to increase the age of the table in mpp24168 test.
 - Remove tests that use gp_filedump.
上级 d4325e08
......@@ -208,57 +208,6 @@ class AppendonlyChecksumTestCase(ScenarioTestCase, MPPTestCase):
Command("Restore data-file", cmd).run(validateAfter=True)
self.select_table(verify_sql,noncorrupted_checksum=True)
def test_aochecksum_size(self):
'''
The following test verifies that the header length is calculated properly when the checksum is on & off
for small, large and bulk-dense header content.
PARAMETERS (via data_provider):
0. Test Name
1. Sql file to create scenario.
2. Expected length of the header
3. Expected type of the header
STEPS :
1. Create table of required type of header (bulk, large header_content or small header_content (ao/co)
2. Find the data file for this relationship
3. Get the gp_filedump of the data file
4. Verify expected length of the header
5. Verify expected type of the header
@data_provider data_types_provider_aochecksum_size
'''
tab_name = self.test_data[0]
chksum_flag = self.test_data[1][0]
header_size = self.test_data[1][1]
header_type = self.test_data[1][2]
tinctest.logger.info('=======================================')
tinctest.logger.info('Starting Test %s' % tab_name)
tinctest.logger.info('Table Name %s' % tab_name)
tinctest.logger.info('chksum_flag %s' % chksum_flag )
tinctest.logger.info('header_size %s' % header_size)
tinctest.logger.info('header_type %s' % header_type)
tinctest.logger.info('=======================================')
self.create_table(tab_name)
(host, db_path) = self.gpfile.get_host_and_db_path(self.dbname)
tinctest.logger.info('Hostname=%s data_directory=%s' %(host,db_path))
tinctest.logger.info( "tab_name : %s chksum_flag: %s header_size: %s header_type: %s" % (tab_name,chksum_flag,header_size,header_type))
file_list = self.gpfile.get_relfile_list(self.dbname, tab_name, db_path, host)
for i in range(0, len(file_list)):
tinctest.logger.info('Getting checksum info for table %s with relfilenode %s' % (tab_name, file_list[i]))
flag=''
if chksum_flag == 'on':
flag=' -M'
if tab_name.endswith('ao'):
flag=flag+' -O row '
has_headerlen = self.gpfile.check_in_filedump(db_path, host, file_list[i], header_size,flag)
has_headertype = self.gpfile.check_in_filedump(db_path, host, file_list[i], header_type,flag)
if not has_headerlen:
raise Exception ("Checksum validation failed for table %s with relfilenode: %s because header length is not %s" % (tab_name,file_list[i],header_size))
if not has_headertype:
raise Exception ("Checksum validation failed for table %s with relfilenode: %s because header type is not %s" % (tab_name,file_list[i],header_type))
@tinctest.dataProvider('data_provider_for_checksum_corruption')
def test_data_provider_for_checksum_corruption():
data = {
......@@ -271,18 +220,3 @@ def test_data_provider_for_checksum_corruption():
"appendonly_verify_block_checksums_co":['appendonly_verify_block_checksums_co',CONST.EOF,CONST.FIND_CHAR]
}
return data
@tinctest.dataProvider('data_types_provider_aochecksum_size')
def test_data_provider_aochecksum_size():
data = {
"chksum_on_header_bulkdense_co":[CONST.CHKSUM_ON,CONST.BULKDENSE_HEADER_LEN_WITH_CHKSUM_ON,CONST.BULKDENSE_HEADER_TYPE],
"chksum_off_header_bulkdense_co":[CONST.CHKSUM_OFF,CONST.BULKDENSE_HEADER_LEN_WITH_CHKSUM_OFF,CONST.BULKDENSE_HEADER_TYPE],
"chksum_off_header_sml_ao":[CONST.CHKSUM_OFF,CONST.SMALL_HEADER_LEN_WITH_CHKSUM_OFF,CONST.SMALL_HEADER_TYPE],
"chksum_off_header_sml_co":[CONST.CHKSUM_OFF,CONST.SMALL_HEADER_LEN_WITH_CHKSUM_OFF,CONST.SMALL_HEADER_TYPE],
"chksum_on_header_sml_ao":[CONST.CHKSUM_ON,CONST.SMALL_HEADER_LEN_WITH_CHKSUM_ON,CONST.SMALL_HEADER_TYPE],
"chksum_on_header_sml_co":[CONST.CHKSUM_ON,CONST.SMALL_HEADER_LEN_WITH_CHKSUM_ON,CONST.SMALL_HEADER_TYPE],
"chksum_on_header_large_co":[CONST.CHKSUM_ON,CONST.LARGE_HEADER_LEN_WITH_CHKSUM_ON,CONST.LARGE_HEADER_TYPE],
"chksum_off_header_large_co":[CONST.CHKSUM_OFF,CONST.LARGE_HEADER_LEN_WITH_CHKSUM_OFF,CONST.LARGE_HEADER_TYPE]
}
return data
"""
Copyright (C) 2004-2015 Pivotal Software, Inc. All rights reserved.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import tinctest
from mpp.models import SQLTestCase
from mpp.models import MPPTestCase
from tinctest.models.scenario import ScenarioTestCase
from mpp.gpdb.tests.storage.lib.gp_filedump import GpfileTestCase
'''
Test for Delta Compression
'''
@tinctest.skipLoading('scenario')
class DeltaCompressionTestCase(SQLTestCase):
"""
@gucs gp_create_table_random_default_distribution=off
@dbname deltagp
@product_version gpdb: [4.3.3.0-]
"""
sql_dir = 'sql_gp/'
ans_dir = 'expected/'
out_dir = 'output/'
@tinctest.skipLoading('scenario')
class DeltaHelper(MPPTestCase):
def gp_filedump_check(self, checksum=False):
gpfile = GpfileTestCase()
dbname=os.environ.get('PGDATABASE')
if checksum == True:
flag = " -M "
else:
flag = " "
table_list = {"delta_1_byte":"deltas_size: 1","delta_2_byte":"deltas_size: 2",
"delta_3_byte":"deltas_size: 3","delta_4_byte":"deltas_size: 4",
"delta_neg_1_byte":"deltas_size: 1","delta_neg_2_byte":"deltas_size: 2",
"delta_neg_3_byte":"deltas_size: 3","delta_neg_4_byte":"deltas_size: 4"}
(host, db_path) = gpfile.get_host_and_db_path(dbname)
# Cases where we expect a delta
for tablename in table_list:
file_list = gpfile.get_relfile_list(dbname, tablename, db_path, host)
for i in range(1, len(file_list)):
tinctest.logger.info('Running compression check for table %s relfilenode %s' % (tablename, file_list[i]))
has_delta = gpfile.check_in_filedump(db_path, host, file_list[i], 'HAS_DELTA_COMPRESSION', flag)
is_byte = gpfile.check_in_filedump(db_path, host, file_list[i], table_list[tablename], flag)
if not has_delta or not is_byte:
raise Exception ("Delta Compression not applied to relation with relfilenode: %s " % file_list[i])
# Cases where we are not expecting a delta
tablename = 'delta_more_4_byte'
file_list = gpfile.get_relfile_list(dbname, tablename, db_path, host)
for i in range(1, len(file_list)):
tinctest.logger.info('Running compression check for table %s relfilenode %s' % (tablename, file_list[i]))
delta = gpfile.check_in_filedump(db_path, host, file_list[i], 'HAS_DELTA_COMPRESSION', flag)
if delta:
raise Exception("Delta Compression not expected for %s" % file_list[i])
class DeltaScenarioTestCase(ScenarioTestCase, MPPTestCase):
"""
@gucs gp_create_table_random_default_distribution=off
"""
def test_delta_byte_check_post_434(self):
'''
@product_version gpdb: [4.3.4.0-]
'''
test_list_1 = []
test_list_1.append(('mpp.gpdb.tests.storage.access_methods.deltacompression.test_sanity_deltacompression.DeltaCompressionTestCase'))
test_list_1.append(('mpp.gpdb.tests.storage.access_methods.deltacompression.test_sanity_deltacompression.DeltaHelper.gp_filedump_check', {'checksum' : True}))
self.test_case_scenario.append(test_list_1, serial=True)
......@@ -37,7 +37,7 @@ class RelfrozenxidUpdateTestCase(MPPTestCase):
#doing multiple selects to increase the age of the table
for x in range(0, 150):
PSQL.run_sql_command("select * from test_table", flags ='-q -t')
PSQL.run_sql_command("select age(relfrozenxid) from gp_dist_random('pg_class') where relname = 'test_table'", flags ='-q -t')
PSQL.run_sql_command("drop table if exists table_master; \
......
......@@ -14,7 +14,7 @@ COMMIT
VACUUM
2: COMMIT;
COMMIT
3: SELECT COUNT(*) FROM reindex_toast_heap WHERE a = 1500;
3: SELECT COUNT(*) FROM reindex_toast_heap WHERE a = '1500';
count
-----
0
......
......@@ -8,6 +8,6 @@ DELETE FROM reindex_toast_heap WHERE b % 4 = 0 ;
1: COMMIT;
2<:
2: COMMIT;
3: SELECT COUNT(*) FROM reindex_toast_heap WHERE a = 1500;
3: SELECT COUNT(*) FROM reindex_toast_heap WHERE a = '1500';
3: INSERT INTO reindex_toast_heap VALUES (0);
3: SELECT 1 AS relfilenode_same_on_all_segs from gp_dist_random('pg_class') WHERE relname = 'idx_btree_reindex_toast_heap' GROUP BY relfilenode having count(*) = (SELECT count(*) FROM gp_segment_configuration WHERE role='p' AND content > -1);
MODULES = lockrelease
DATA_built = install.sql
override CC:=gcc
override CFLAGS:=-m64
PG_CONFIG = pg_config
PGXS := $(shell $(PG_CONFIG) --pgxs)
include $(PGXS)
......@@ -490,7 +490,7 @@ INSERT 0 99
]]></pre>
<trigger><![CDATA[
vacuum full pg_attribute;
ERROR: fault triggered, fault name:'repair_frag_end' fault type:'error' (faultinjector.c:xxx)
ERROR: fault triggered, fault name:'repair_frag_end' fault type:'error'
]]></trigger>
<post><![CDATA[
select count(*) from vacuum_full_large_partition_heap;
......
......@@ -53,8 +53,8 @@ select pg_relation_size('ivfheap') from gp_dist_random('gp_id') where gp_segment
]]></pre>
<trigger><![CDATA[
vacuum full vfheap;
ERROR: fault triggered, fault name:'vacuum_full_after_truncate' fault type:'checkpoint_and_panic' (faultinjector.c:xxx)
ERROR: could not temporarily connect to one or more segments (SOMEFILE:SOMEFUNC)
ERROR: fault triggered, fault name:'vacuum_full_after_truncate' fault type:'checkpoint_and_panic'
ERROR: could not connect to segment: initialization of segworker group failed (cdbgang.c:xxx)
]]></trigger>
<post><![CDATA[
select pg_relation_size('vfheap') from gp_dist_random('gp_id') where gp_segment_id = 0;
......
......@@ -49,8 +49,8 @@ select pg_relation_size('ivfheap') from gp_dist_random('gp_id') where gp_segment
]]></pre>
<trigger><![CDATA[
vacuum full vfheap;
psql:/Users/haradh1/dev/tincrepo/private/haradh1/mpp-24055/vacuum/scenario/output/out/heap_crash_before_truncate_trigger.sql:7: ERROR: fault triggered, fault name:'vacuum_full_before_truncate' fault type:'panic' (faultinjector.c:623) (seg0 haradh1-mac.local:40000 pid=15655) (cdbdisp.c:1526)
ERROR: could not temporarily connect to one or more segments (SOMEFILE:SOMEFUNC)
ERROR: fault triggered, fault name:'vacuum_full_before_truncate' fault type:'panic' (seg0 haradh1-mac.local:40000 pid=15655) (cdbdisp.c:1526)
ERROR: could not connect to segment: initialization of segworker group failed (cdbgang.c:xxx)
]]></trigger>
<post><![CDATA[
select pg_relation_size('vfheap') from gp_dist_random('gp_id') where gp_segment_id = 0;
......
......@@ -49,7 +49,7 @@ select pg_relation_size('ivfheap') from gp_dist_random('gp_id') where gp_segment
]]></pre>
<trigger><![CDATA[
vacuum full vfheap;
psql:/Users/haradh1/dev/tincrepo/private/haradh1/mpp-24055/vacuum/scenario/output/out/heap_mastercrash_before_truncate_trigger.sql:7: PANIC: fault triggered, fault name:'vacuum_full_before_truncate' fault type:'panic' (faultinjector.c:630)
PANIC: fault triggered, fault name:'vacuum_full_before_truncate' fault type:'panic'
server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
......
......@@ -49,7 +49,7 @@ select pg_relation_size('ivfheap') from gp_dist_random('gp_id') where gp_segment
]]></pre>
<trigger><![CDATA[
vacuum full vfheap;
psql:/Users/haradh1/dev/tincrepo/private/haradh1/mpp-24055/vacuum/scenario/output/out/heap_mastererror_before_truncate_trigger.sql:7: ERROR: fault triggered, fault name:'vacuum_full_before_truncate' fault type:'error' (faultinjector.c:649)
ERROR: fault triggered, fault name:'vacuum_full_before_truncate' fault type:'error'
]]></trigger>
<post><![CDATA[
......
......@@ -50,8 +50,8 @@ select pg_relation_size('ivfheap') from gp_dist_random('gp_id') where gp_segment
]]></pre>
<trigger><![CDATA[
vacuum full vfheap;
psql:/Users/haradh1/dev/tincrepo/private/haradh1/mpp-24055/vacuum/scenario/output/out/heap_crash_before_truncate_trigger.sql:7: ERROR: fault triggered, fault name:'vacuum_full_before_truncate' fault type:'panic' (faultinjector.c:623) (seg0 haradh1-mac.local:40000 pid=15655) (cdbdisp.c:1526)
ERROR: could not temporarily connect to one or more segments (SOMEFILE:SOMEFUNC)
ERROR: fault triggered, fault name:'vacuum_full_before_truncate' fault type:'panic' (seg0 haradh1-mac.local:40000 pid=15655) (cdbdisp.c:1526)
ERROR: could not connect to segment: initialization of segworker group failed (cdbgang.c:xxx)
]]></trigger>
<post><![CDATA[
select pg_relation_size('vfheap') from gp_dist_random('gp_id') where gp_segment_id = 0;
......
......@@ -30,12 +30,6 @@ select pg_relation_size((select segrelid from pg_appendonly where relid = 'vfao'
65536
(1 row)
select pg_relation_size((select segidxid from pg_appendonly where relid = 'vfao'::regclass)) from gp_dist_random('gp_id') where gp_segment_id = 0;
pg_relation_size
------------------
65536
(1 row)
vacuum full vfao;
VACUUM
select pg_relation_size((select segrelid from pg_appendonly where relid = 'vfao'::regclass)) from gp_dist_random('gp_id') where gp_segment_id = 0;
......@@ -43,10 +37,3 @@ select pg_relation_size((select segrelid from pg_appendonly where relid = 'vfao'
------------------
32768
(1 row)
select pg_relation_size((select segidxid from pg_appendonly where relid = 'vfao'::regclass)) from gp_dist_random('gp_id') where gp_segment_id = 0;
pg_relation_size
------------------
65536
(1 row)
......@@ -22,8 +22,6 @@ create index ivfao on vfao(b, c);
select junkloop('vfao', 300);
select pg_relation_size((select segrelid from pg_appendonly where relid = 'vfao'::regclass)) from gp_dist_random('gp_id') where gp_segment_id = 0;
select pg_relation_size((select segidxid from pg_appendonly where relid = 'vfao'::regclass)) from gp_dist_random('gp_id') where gp_segment_id = 0;
vacuum full vfao;
select pg_relation_size((select segrelid from pg_appendonly where relid = 'vfao'::regclass)) from gp_dist_random('gp_id') where gp_segment_id = 0;
select pg_relation_size((select segidxid from pg_appendonly where relid = 'vfao'::regclass)) from gp_dist_random('gp_id') where gp_segment_id = 0;
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册