提交 9dec48aa 编写于 作者: L Larry Hamel 提交者: Chris Hajas

Tighten the criteria for partition validation (#1258)

* Tighten the criteria for partition validation

Add additional case when source and destination attributes are different.

Authors: Larry Hamel, Marbin Tan, Chris Hajas
上级 6b4e5a56
......@@ -1832,7 +1832,7 @@ Feature: gptransfer tests
And there is a file "input_file" with tables "gptest.public.sales_1_prt_2_2_prt_2, gptest.public.sales_1_prt_2"
When the user runs "gptransfer -f input_file --partition-transfer --source-port $GPTRANSFER_SOURCE_PORT --source-host $GPTRANSFER_SOURCE_HOST --source-user $GPTRANSFER_SOURCE_USER --dest-user $GPTRANSFER_DEST_USER --dest-port $GPTRANSFER_DEST_PORT --dest-host $GPTRANSFER_DEST_HOST --source-map-file $GPTRANSFER_MAP_FILE"
Then gptransfer should return a return code of 2
And gptransfer should print Max levels of partition is not same to stdout
And gptransfer should print Max level of partition is not same to stdout
@partition_transfer
@prt_transfer_15
......@@ -1858,7 +1858,7 @@ Feature: gptransfer tests
And there is a file "input_file" with tables "gptest.public.employee_1_prt_main, gptest.public.employee_1_prt_main"
When the user runs "gptransfer -f input_file --partition-transfer --source-port $GPTRANSFER_SOURCE_PORT --source-host $GPTRANSFER_SOURCE_HOST --source-user $GPTRANSFER_SOURCE_USER --dest-user $GPTRANSFER_DEST_USER --dest-port $GPTRANSFER_DEST_PORT --dest-host $GPTRANSFER_DEST_HOST --source-map-file $GPTRANSFER_MAP_FILE"
Then gptransfer should return a return code of 2
And gptransfer should print Partition column attributes is different to stdout
And gptransfer should print Partition column attributes are different to stdout
@partition_transfer
@prt_transfer_17
......@@ -1871,7 +1871,7 @@ Feature: gptransfer tests
And there is a file "input_file" with tables "gptest.public.sales_1_prt_2_2_prt_asia, gptest.public.sales_1_prt_2_2_prt_asia"
When the user runs "gptransfer -f input_file --partition-transfer --source-port $GPTRANSFER_SOURCE_PORT --source-host $GPTRANSFER_SOURCE_HOST --source-user $GPTRANSFER_SOURCE_USER --dest-user $GPTRANSFER_DEST_USER --dest-port $GPTRANSFER_DEST_PORT --dest-host $GPTRANSFER_DEST_HOST --source-map-file $GPTRANSFER_MAP_FILE"
Then gptransfer should return a return code of 2
And gptransfer should print Partition column attributes is different to stdout
And gptransfer should print Partition column attributes are different to stdout
@partition_transfer
@prt_transfer_18
......@@ -1897,7 +1897,7 @@ Feature: gptransfer tests
And there is a file "input_file" with tables "gptest.public.sales_1_prt_2_2_prt_asia, gptest.public.sales_1_prt_2_2_prt_asia"
When the user runs "gptransfer -f input_file --partition-transfer --source-port $GPTRANSFER_SOURCE_PORT --source-host $GPTRANSFER_SOURCE_HOST --source-user $GPTRANSFER_SOURCE_USER --dest-user $GPTRANSFER_DEST_USER --dest-port $GPTRANSFER_DEST_PORT --dest-host $GPTRANSFER_DEST_HOST --source-map-file $GPTRANSFER_MAP_FILE"
Then gptransfer should return a return code of 2
And gptransfer should print Partition column attributes is different to stdout
And gptransfer should print Partition column attributes are different to stdout
@partition_transfer
@prt_transfer_20
......@@ -1982,7 +1982,7 @@ Feature: gptransfer tests
And there is a file "input_file" with tables "gptest.public.employee_1_prt_boys, gptest.public.employee_1_prt_boys"
When the user runs "gptransfer -f input_file --partition-transfer --source-port $GPTRANSFER_SOURCE_PORT --source-host $GPTRANSFER_SOURCE_HOST --source-user $GPTRANSFER_SOURCE_USER --dest-user $GPTRANSFER_DEST_USER --dest-port $GPTRANSFER_DEST_PORT --dest-host $GPTRANSFER_DEST_HOST --source-map-file $GPTRANSFER_MAP_FILE -a"
Then gptransfer should return a return code of 2
And gptransfer should print Partition column attributes is different to stdout
And gptransfer should print Number of partition columns is different to stdout
@partition_transfer
@prt_transfer_26
......
......@@ -19,6 +19,17 @@ class GpTestCase(unittest.TestCase):
[p.stop() for p in self.patches]
self.mock_objs = []
def add_setup(setup=None, teardown=None):
"""decorate test functions to add additional setup/teardown contexts"""
def decorate_function(test):
def wrapper(self):
if setup:
setup(self)
test(self)
if teardown:
teardown(self)
return wrapper
return decorate_function
# hide unittest dependencies here
def run_tests():
......
......@@ -2,7 +2,9 @@ import imp
import os
from mock import *
from gp_unittest import *
from gparray import GpDB, GpArray
from gppylib.db.dbconn import UnexpectedRowsError
from pygresql import pgdb
class GpTransfer(GpTestCase):
......@@ -12,11 +14,28 @@ class GpTransfer(GpTestCase):
# if we had a gptransfer.py, this is equivalent to:
# import gptransfer
# self.subject = gptransfer
gptransfer_file = os.path.abspath(os.path.dirname(__file__) +
"/../../../gptransfer")
self.subject = imp.load_source('gptransfer',
gptransfer_file)
self.subject.logger = Mock(spec=['log', 'warn', 'info', 'debug', 'error'])
gptransfer_file = os.path.abspath(os.path.dirname(__file__) + "/../../../gptransfer")
self.subject = imp.load_source('gptransfer', gptransfer_file)
self.subject.logger = Mock(spec=['log', 'warn', 'info', 'debug', 'error', 'warning'])
self.gparray = self.createGpArrayWith2Primary2Mirrors()
self.db_connection = MagicMock(spec=["__exit__", "close", "__enter__"])
self.cursor = MagicMock(spec=pgdb.pgdbCursor)
self.db_singleton = Mock()
self.apply_patches([
patch('os.environ', new={}),
patch('gppylib.operations.dump.GpArray.initFromCatalog', return_value=self.gparray),
patch('gptransfer.connect', return_value=self.db_connection),
patch('gptransfer.getUserDatabaseList', return_value=[["my_first_database"],["my_second_database"]]),
patch('gppylib.db.dbconn.connect', return_value=self.db_connection),
patch('gptransfer.WorkerPool', return_value=Mock()),
patch('gptransfer.doesSchemaExist', return_value=True),
patch('gptransfer.dropSchemaIfExist'),
patch('gptransfer.execSQL', new=self.cursor),
patch('gptransfer.execSQLForSingletonRow', new=self.db_singleton),
patch("gppylib.commands.unix.FileDirExists.remote", return_value=True)
])
# We have a GIGANTIC class that uses 31 arguments, so pre-setting this
# here
......@@ -52,6 +71,51 @@ class GpTransfer(GpTestCase):
quote='foo',
table_transfer_set_total='foo')
self.GpTransfer_options_defaults = dict(
analyze=False,
base_port=8000,
batch_size=2,
databases=[],
delimiter=',',
dest_database=None,
dest_host='127.0.0.1',
dest_port=5432,
dest_user='gpadmin',
drop=False,
dry_run=False,
enable_test=False,
exclude_input_file=None,
exclude_tables=[],
exclusive_lock=False,
force_standard_mode=False,
format='CSV',
full=False,
input_file=None,
interactive=True,
last_port=-1,
logfileDirectory=None,
max_gpfdist_instances=1,
max_line_length=10485760,
no_final_count_validation=False,
partition_transfer=False,
quiet=None,
quote='\x01',
schema_only=False,
skip_existing=False,
source_host='127.0.0.1',
source_map_file=None,
source_port=5432,
source_user='gpadmin',
sub_batch_size=25,
tables=[],
timeout=300,
truncate=False,
validator=None,
verbose=None,
wait_time=3,
work_base_dir='/home/gpadmin/',
)
@patch('gptransfer.TableValidatorFactory', return_value=Mock())
@patch('gptransfer.execSQLForSingletonRow', side_effect=[['MYDATE'],
['MY"DATE'],
......@@ -104,5 +168,291 @@ class GpTransfer(GpTestCase):
self.assertEqual(1, len(self.subject.logger.method_calls))
self.assertEqual(expected_distribution, result_distribution)
def test__validates_good_partition(self):
options = self.setup_partition_validation()
self.cursor.side_effect = CursorSideEffect().cursor_side_effect
self.subject.GpTransfer(Mock(**options), [])
def test__validate_bad_partition_not_leaf(self):
options = self.setup_partition_validation()
additional = {
"select relname from pg_class r": ["many", "relations"],
}
self.cursor.side_effect = CursorSideEffect(additional).cursor_side_effect
with self.assertRaisesRegexp(Exception, "Destination table "):
self.subject.GpTransfer(Mock(**options), [])
def test__validate_bad_partition_different_number_columns(self):
options = self.setup_partition_validation()
additional = {
"select ordinal_position, is_nullable, data_type, character_maximum_length,": [[1, "t", "my_data_type", 255, 16, 1024, 1024, 1, 1024, "my_interval_type", "my_udt_name"],
[2, "t", "my_data_type", 255, 16, 1024, 1024, 1, 1024, "my_interval_type", "my_udt_name"]],
}
self.cursor.side_effect = CursorSideEffect(additional).cursor_side_effect
with self.assertRaisesRegexp(Exception, "has different column layout or types"):
self.subject.GpTransfer(Mock(**options), [])
def test__validate_bad_partition_different_column_type(self):
options = self.setup_partition_validation()
additional = {
"select ordinal_position, is_nullable, data_type, character_maximum_length,": [[1, "t", "my_new_data_type", 255, 16, 1024, 1024, 1, 1024, "my_interval_type", "my_udt_name"]],
}
self.cursor.side_effect = CursorSideEffect(additional).cursor_side_effect
with self.assertRaisesRegexp(Exception, "has different column layout or types"):
self.subject.GpTransfer(Mock(**options), [])
def test__validate_bad_partition_different_max_levels(self):
options = self.setup_partition_validation()
additional = {
"select max(p1.partitionlevel)": [2],
}
self.db_singleton.side_effect = SingletonSideEffect(additional).singleton_side_effect
with self.assertRaisesRegexp(Exception, "has different partition criteria from destination table"):
self.subject.GpTransfer(Mock(**options), [])
log_messages = [args[0][0] for args in self.subject.logger.error.call_args_list]
self.assertIn("Max level of partition is not same between", log_messages[0])
def test__validate_bad_partition_different_values_of_attributes(self):
options = self.setup_partition_validation()
additional = {
"select parkind, parlevel, parnatts, paratts": [["my_parkind", 1, "my_parnatts", "3 4"]],
}
self.cursor.side_effect = CursorSideEffect(additional).cursor_side_effect
with self.assertRaisesRegexp(Exception, "has different partition criteria from destination table"):
self.subject.GpTransfer(Mock(**options), [])
log_messages = [args[0][0] for args in self.subject.logger.error.call_args_list]
self.assertIn("Partition type or key is different between", log_messages[1])
self.assertIn("Partition column attributes are different at level", log_messages[0])
def test__validate_bad_partition_different_parent_kind(self):
options = self.setup_partition_validation()
additional = {
"select parkind, parlevel, parnatts, paratts": [["different_parkind", 1, "my_parnatts", "my_paratts"]],
}
self.cursor.side_effect = CursorSideEffect(additional).cursor_side_effect
with self.assertRaisesRegexp(Exception, "has different partition criteria from destination table"):
self.subject.GpTransfer(Mock(**options), [])
log_messages = [args[0][0] for args in self.subject.logger.error.call_args_list]
self.assertIn("Partition type or key is different between", log_messages[1])
self.assertIn("Partition type is different at level", log_messages[0])
def test__validate_bad_partition_different_number_of_attributes(self):
options = self.setup_partition_validation()
additional = {
"select parkind, parlevel, parnatts, paratts": [["my_parkind", 1, 2, "my_paratts"]],
}
self.cursor.side_effect = CursorSideEffect(additional).cursor_side_effect
with self.assertRaisesRegexp(Exception, "has different partition criteria from destination table"):
self.subject.GpTransfer(Mock(**options), [])
log_messages = [args[0][0] for args in self.subject.logger.error.call_args_list]
self.assertIn("Partition type or key is different between", log_messages[1])
self.assertIn("Number of partition columns is different at level ", log_messages[0])
def test__validate_bad_partition_different_partition_values(self):
options = self.setup_partition_validation()
additional = {
"select n.nspname, c.relname": [["not_public", "not_my_table", ""],["public", "my_table", ""]],
"select parisdefault, parruleord, parrangestartincl,": ["t", "1", "t", "t", 100, 10, "", ""],
}
self.db_singleton.side_effect = SingletonSideEffect(additional).singleton_side_effect
with self.assertRaisesRegexp(Exception, "has different partition criteria from destination table"):
self.subject.GpTransfer(Mock(**options), [])
log_messages = [args[0][0] for args in self.subject.logger.error.call_args_list]
self.assertIn("One of the subpartition table is a default partition", log_messages[0])
self.assertIn("Partition value is different in the partition hierarchy between", log_messages[1])
def test__validate_bad_partition_unknown_type(self):
options = self.setup_partition_validation()
my_singleton = SingletonSideEffect()
my_singleton.values["select partitiontype"] = ["unknown"]
self.db_singleton.side_effect = my_singleton.singleton_side_effect
with self.assertRaisesRegexp(Exception, "Unknown partitioning type "):
self.subject.GpTransfer(Mock(**options), [])
def test__validate_bad_partition_different_list_values(self):
options = self.setup_partition_validation()
additional = {
"select parisdefault, parruleord, parrangestartincl,": ["f", "1", "t", "t", 100, 10, "", "different"],
}
my_singleton = SingletonSideEffect(additional)
my_singleton.values["select partitiontype"] = [["list"]]
self.db_singleton.side_effect = my_singleton.singleton_side_effect
with self.assertRaisesRegexp(Exception, "has different partition criteria from destination table"):
self.subject.GpTransfer(Mock(**options), [])
log_messages = [args[0][0] for args in self.subject.logger.error.call_args_list]
self.assertIn("List partition value is different between", log_messages[0])
self.assertIn("Partition value is different in the partition hierarchy between", log_messages[1])
def test__validate_bad_partition_different_range_values(self):
self.run_range_partition_value({"select parisdefault, parruleord, parrangestartincl,": ["f", "1", "f", "t", 100, 10, "", "different"]})
self.run_range_partition_value({"select parisdefault, parruleord, parrangestartincl,": ["f", "1", "t", "f", 999, 10, "", "different"]})
self.run_range_partition_value({"select parisdefault, parruleord, parrangestartincl,": ["f", "1", "t", "t", 100, 999, "", "different"]})
self.run_range_partition_value({"select parisdefault, parruleord, parrangestartincl,": ["f", "1", "t", "t", 100, 10, 999, "different"]})
def test__validate_bad_partition_different_parent_partition(self):
options = self.setup_partition_validation()
multi = {
"select parisdefault, parruleord, parrangestartincl,": [["f", "1", "t", "t", 100, 10, "", ""], ["f", "1", "t", "t", 100, 10, "", ""], ["f", "1", "t", "t", 999, 10, "", ""]],
}
singleton_side_effect = SingletonSideEffect(multi_list=multi)
self.db_singleton.side_effect = singleton_side_effect.singleton_side_effect
with self.assertRaisesRegexp(Exception, "has different partition criteria from destination table"):
self.subject.GpTransfer(Mock(**options), [])
log_messages = [args[0][0] for args in self.subject.logger.error.call_args_list]
self.assertIn("Range partition value is different between source partition table", log_messages[0])
self.assertIn("Partitions have different parents at level", log_messages[1])
####################################################################################################################
# End of tests, start of private methods/objects
####################################################################################################################
def run_range_partition_value(self, additional):
options = self.setup_partition_validation()
self.db_singleton.side_effect = SingletonSideEffect(additional).singleton_side_effect
with self.assertRaisesRegexp(Exception, "has different partition criteria from destination table"):
self.subject.GpTransfer(Mock(**options), [])
log_messages = [args[0][0] for args in self.subject.logger.error.call_args_list]
self.assertIn("Range partition value is different between", log_messages[0])
self.assertIn("Partition value is different in the partition hierarchy between", log_messages[1])
def createGpArrayWith2Primary2Mirrors(self):
master = GpDB.initFromString("1|-1|p|p|s|u|mdw|mdw|5432|None|/data/master||/data/master/base/10899,/data/master/base/1,/data/master/base/10898,/data/master/base/25780,/data/master/base/34782")
primary0 = GpDB.initFromString("2|0|p|p|s|u|sdw1|sdw1|40000|41000|/data/primary0||/data/primary0/base/10899,/data/primary0/base/1,/data/primary0/base/10898,/data/primary0/base/25780,/data/primary0/base/34782")
primary1 = GpDB.initFromString("3|1|p|p|s|u|sdw2|sdw2|40001|41001|/data/primary1||/data/primary1/base/10899,/data/primary1/base/1,/data/primary1/base/10898,/data/primary1/base/25780,/data/primary1/base/34782")
mirror0 = GpDB.initFromString("4|0|m|m|s|u|sdw2|sdw2|50000|51000|/data/mirror0||/data/mirror0/base/10899,/data/mirror0/base/1,/data/mirror0/base/10898,/data/mirror0/base/25780,/data/mirror0/base/34782")
mirror1 = GpDB.initFromString("5|1|m|m|s|u|sdw1|sdw1|50001|51001|/data/mirror1||/data/mirror1/base/10899,/data/mirror1/base/1,/data/mirror1/base/10898,/data/mirror1/base/25780,/data/mirror1/base/34782")
return GpArray([master, primary0, primary1, mirror0, mirror1])
def setup_partition_validation(self):
os.environ["GPHOME"] = "my_gp_home"
SOURCE_MAP_FILENAME = "/tmp/gptransfer_test_source_map"
with open(SOURCE_MAP_FILENAME, "w") as src_map_file:
src_map_file.write("sdw1,12700\nsdw2,12700")
INPUT_FILENAME = "/tmp/gptransfer_test"
with open(INPUT_FILENAME, "w") as src_map_file:
src_map_file.write("my_first_database.public.my_table")
self.cursor.side_effect = CursorSideEffect().cursor_side_effect
self.db_singleton.side_effect = SingletonSideEffect().singleton_side_effect
options = {}
options.update(self.GpTransfer_options_defaults)
options.update(
partition_transfer=True,
input_file=INPUT_FILENAME,
source_map_file=SOURCE_MAP_FILENAME,
base_port=15432,
max_line_length=32768,
work_base_dir="/tmp",
source_port=45432,
dest_port=15432,
)
return options
class CursorSideEffect:
def __init__(self, additional=None):
self.first_values = {
"n.nspname, c.relname, c.relstorage": [["public", "my_table", ""]],
"select relname from pg_class r": ["my_relname"],
"select ordinal_position, is_nullable, data_type, character_maximum_length,": [[1, "t", "my_data_type", 255, 16, 1024, 1024, 1, 1024, "my_interval_type", "my_udt_name"]],
"select parkind, parlevel, parnatts, paratts": [["my_parkind", 1, "my_parnatts", "my_paratts"]],
"SELECT fsname FROM pg_catalog.pg_filespace": ["public"],
}
self.counters = dict((key, 0) for key in self.first_values.keys())
self.second_values = self.first_values.copy()
if additional:
self.second_values.update(additional)
def cursor_side_effect(self, *args):
for key in self.first_values.keys():
for arg in args[1:]:
if key in arg:
if self.has_called(key):
return FakeCursor(self.second_values[key])
return FakeCursor(self.first_values[key])
return None
def has_called(self, key):
self.counters[key] += 1
return self.counters[key] > 1
class FakeCursor:
def __init__(self, my_list):
self.list = list([[""]])
if my_list:
self.list = my_list
self.rowcount = len(self.list)
def __iter__(self):
return iter(self.list)
def close(self):
pass
class SingletonSideEffect:
def __init__(self, additional=None, multi_list=None):
self.values = {
"select partitiontype": ["range"],
"select max(p1.partitionlevel)": [1],
"select schemaname, tablename from pg_catalog.pg_partitions": ["public", "my_table"],
"select c.oid": ["oid1", "oid1"],
"select parisdefault, parruleord, parrangestartincl,": ["f", "1", "t", "t", 100, 10, "", ""],
"select n.nspname, c.relname": ["public", "my_table"]
}
self.counters = dict((key, 0) for key in self.values.keys())
# make values into list to accommodate multiple sequential values
self.values = dict((key, [value]) for (key, value) in self.values.iteritems())
for key in self.values.keys():
if additional:
if key in additional:
value = self.values[key]
value.append(additional[key])
if multi_list:
if key in multi_list:
value = self.values[key]
value.extend(multi_list[key])
def singleton_side_effect(self, *args):
for key in self.values.keys():
for arg in args:
if key in arg:
value_list = self.values[key]
result = value_list[self.counters[key] % len(value_list)]
self.counters[key] += 1
return result
return None
if __name__ == '__main__':
run_tests()
......@@ -828,7 +828,7 @@ def drop_existing_schema_on_system(host, port, user, schema, databases=None):
conn = connect(url)
schema_exists = dropSchemaIfExist(conn, schema)
conn.close()
if schema_exists:
if schema_exists: # todo this smells like an error since it can exit early. Please investigate
return True
# --------------------------------------------------------------------------
......@@ -3632,16 +3632,16 @@ class GpTransfer(object):
self._check_leaf_partition_set(table_pair)
if not self._has_same_column_types(table_pair):
raise Exception('Source paritition table %s has different column layout or types from '
raise Exception('Source partition table %s has different column layout or types from '
'destination partition table %s' % (str(table_pair.source), str(table_pair.dest)))
if not self._has_same_partition_criteria(table_pair):
raise Exception('Source paritition table %s has different partition criteria from '
raise Exception('Source partition table %s has different partition criteria from '
'destination table %s' % (str(table_pair.source), str(table_pair.dest)))
def _has_same_column_types(self, table_pair):
'''
Colums at the same ordinal_position of two partition tables need to be of same type
'''
"""
Columns at the same ordinal_position of two partition tables need to be of same type
"""
src_tbl_columns = self._get_table_columns(table_pair.source, self._options.source_host, self._options.source_port, self._options.source_user)
dest_tbl_columns = self._get_table_columns(table_pair.dest, self._options.dest_host, self._options.dest_port, self._options.dest_user)
......@@ -3694,7 +3694,7 @@ class GpTransfer(object):
logger.debug('Verifying that partition table transfer pair has same partition criteria')
source_dest_info = 'source partition table %s and destination partition table %s' % (str(table_pair.source), str(table_pair.dest))
if not self._has_same_partition_levels(table_pair):
logger.error('Max levels of partition is not same between %s' % source_dest_info)
logger.error('Max level of partition is not same between %s' % source_dest_info)
return False
if not self._has_same_partition_type_and_key_columns(table_pair):
logger.error('Partition type or key is different between %s' % source_dest_info)
......@@ -3722,7 +3722,10 @@ class GpTransfer(object):
return True
src_schema, src_table = self._get_parentpartable(src_db, src_schema, src_table, self._options.source_host, self._options.source_port, self._options.source_user)
dest_schema, dest_table = self._get_parentpartable(dest_db, dest_schema, dest_table, self._options.dest_host, self._options.dest_port, self._options.dest_user)
return self._has_same_parent_partition_value(level - 1, src_db, src_schema, src_table, dest_db, dest_schema, dest_table)
result = self._has_same_parent_partition_value(level - 1, src_db, src_schema, src_table, dest_db, dest_schema, dest_table)
if not result:
logger.error("Partitions have different parents at level: %d" % (level - 1))
return result
def _get_parentpartable(self, db, schema, table, host, port, user):
"""
......@@ -3784,14 +3787,16 @@ class GpTransfer(object):
for level_key in source_partition_column_info:
if source_partition_column_info[level_key]['parkind'] != dest_partition_column_info[level_key]['parkind']:
logger.error('Partition type is different at level %s between %s' % (level_key, source_dest_info))
return False
if source_partition_column_info[level_key]['parnatts'] != dest_partition_column_info[level_key]['parnatts']:
logger.error('Number of partition columns is different at level %s between %s' % (level_key, source_dest_info))
return False
# this handles multi column partition, sort them before compare, in case same partition columns but in random order
source_paratts = [att.strip() for att in source_partition_column_info[level_key]['paratts'].split(' ')]
dest_paratts = [att.strip() for att in dest_partition_column_info[level_key]['paratts'].split(' ')]
if sorted(source_paratts) != sorted(dest_paratts):
logger.error('Partition column attributes is different at level %s between %s' %
logger.error('Partition column attributes are different at level %s between %s' %
(level_key, source_dest_info))
return False
return True
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册