From 0dcbbebbe532264c1a2416d63e7dec1ea3661204 Mon Sep 17 00:00:00 2001 From: Karen Huddleston Date: Wed, 24 May 2017 17:49:13 -0700 Subject: [PATCH] Unify backup/restore tests to work with Data Domain This is part of the effort to unify our backup/restore tests into a single suite. * Adds infrastructure to setup DDBoost on the client and cleanup server * after completion * Adds tests for DDBoost specific options * Adds test coverage from TINC suite that was not included in behave Signed-off-by: Chris Hajas --- gpMgmt/Makefile.behave | 7 +- .../backup_and_restore_backups.feature | 65 +++- .../backup_and_restore_restores.feature | 61 +++- gpMgmt/test/behave/mgmt_utils/backups.feature | 85 ++++- .../test/behave/mgmt_utils/restores.feature | 16 + .../behave/mgmt_utils/steps/mgmt_utils.py | 308 ++++++++++++------ gpMgmt/test/behave_utils/utils.py | 7 +- 7 files changed, 431 insertions(+), 118 deletions(-) diff --git a/gpMgmt/Makefile.behave b/gpMgmt/Makefile.behave index 2919c25372..03fd35f3ff 100644 --- a/gpMgmt/Makefile.behave +++ b/gpMgmt/Makefile.behave @@ -6,6 +6,7 @@ BEHAVE_BIN=$(PYTHONSRC_INSTALL)/bin/behave PYTHONSRC_INSTALL_SITE=$(shell find $(PYLIB_SRC_EXT)/install/lib -name "site-packages") PYTHONSRC_INSTALL_PYTHON_PATH=$(PYTHONPATH):$(PYTHONSRC_INSTALL_SITE) TAR?=$(shell which tar) +PEXPECT_LIB=$(GPHOME)/bin/lib $(BEHAVE_BIN): TAR=$(TAR) make -C bin $(BEHAVE_BIN) @@ -15,9 +16,9 @@ $(BEHAVE_BIN): behave: $(BEHAVE_BIN) @echo "Running behave on management scripts..." @if [ -n """$(flags)""" ]; then \ - PYTHONPATH=$(PYTHONSRC_INSTALL_PYTHON_PATH):$(GPMGMT_SRC) python $(BEHAVE_BIN) $(GPMGMT_SRC)/test/behave/* -s -k $(flags) 2>&1 ; \ + PYTHONPATH=$(PYTHONSRC_INSTALL_PYTHON_PATH):$(GPMGMT_SRC):$(PEXPECT_LIB) python $(BEHAVE_BIN) $(GPMGMT_SRC)/test/behave/* -s -k $(flags) 2>&1 ; \ elif [ -n """$(tags)""" ]; then \ - PYTHONPATH=$(PYTHONSRC_INSTALL_PYTHON_PATH):$(GPMGMT_SRC) python $(BEHAVE_BIN) $(GPMGMT_SRC)/test/behave/* -s -k --tags=$(tags) 2>&1 ; \ + PYTHONPATH=$(PYTHONSRC_INSTALL_PYTHON_PATH):$(GPMGMT_SRC):$(PEXPECT_LIB) python $(BEHAVE_BIN) $(GPMGMT_SRC)/test/behave/* -s -k --tags=$(tags) 2>&1 ; \ else \ - echo "Please specify tags=tagname or flags=[behave flags]"; \ + echo "Please specify tags=tagname or flags=[behave flags]"; \ fi diff --git a/gpMgmt/test/behave/mgmt_utils/backup_and_restore_backups.feature b/gpMgmt/test/behave/mgmt_utils/backup_and_restore_backups.feature index c7ef229c85..12a220646f 100644 --- a/gpMgmt/test/behave/mgmt_utils/backup_and_restore_backups.feature +++ b/gpMgmt/test/behave/mgmt_utils/backup_and_restore_backups.feature @@ -1,10 +1,15 @@ @backup_and_restore_backups Feature: Validate command line arguments + @nbuonly @nbusetup77 Scenario: Setup to load NBU libraries Given the test suite is initialized for Netbackup "7.7" - And the netbackup storage params have been parsed + + @ddonly + @ddboostsetup + Scenario: Setup DDBoost configuration + Given the test suite is initialized for DDBoost Scenario: 1 Dirty table list check on recreating a table with same data and contents Given the backup test is initialized with database "bkdb1" @@ -18,6 +23,7 @@ Feature: Validate command line arguments And "public.ao_table" is marked as dirty in dirty_list file @nbupartI + @ddpartI Scenario: 2 Simple Incremental Backup Given the backup test is initialized with database "bkdb2" And there is a "ao" table "public.ao_table" in "bkdb2" with data @@ -43,7 +49,7 @@ Feature: Validate command line arguments And data for partition table "part_external" with partition level "0" is distributed across all segments on "bkdb2" When the user runs "gpcrondump -a -x bkdb2" Then gpcrondump should return a return code of 0 - And gpcrondump should print "Validating disk space" to stdout + And gpcrondump should print the correct disk space check message And the full backup timestamp from gpcrondump is stored And the state files are generated under " " for stored "full" timestamp And the "last_operation" files are generated under " " for stored "full" timestamp @@ -108,6 +114,7 @@ Feature: Validate command line arguments And all files for full backup have been removed in path "/tmp/5" @nbupartI + @ddpartI Scenario: 5a Full Backup and Restore Given the backup test is initialized with database "bkdb5a" And there is a "heap" table "public.heap_table" in "bkdb5a" with data @@ -141,6 +148,7 @@ Feature: Validate command line arguments And verify that the "status" file in " " dir contains "reading triggers" @nbupartI + @ddpartI Scenario: 6 Metadata-only restore Given the backup test is initialized with database "bkdb6" And schema "schema_heap" exists in "bkdb6" @@ -151,6 +159,7 @@ Feature: Validate command line arguments And the schemas "schema_heap" do not exist in "bkdb6" @nbupartI + @ddpartI Scenario: 7 Metadata-only restore with global objects (-G) Given the backup test is initialized with database "bkdb7" And schema "schema_heap" exists in "bkdb7" @@ -162,6 +171,7 @@ Feature: Validate command line arguments And the user runs "psql -c 'DROP ROLE "foo%userWITHCAPS"' bkdb7" And the schemas "schema_heap" do not exist in "bkdb7" + @ddpartI Scenario: 8 gpdbrestore -L with Full Backup Given the backup test is initialized with database "bkdb8" And there is a "heap" table "public.heap_table" in "bkdb8" with data @@ -173,6 +183,7 @@ Feature: Validate command line arguments And verify that the "report" file in " " dir contains "Backup Type: Full" @nbupartI + @ddpartI Scenario: 11 Backup and restore with -G only Given the backup test is initialized with database "bkdb11" And there is a "heap" table "public.heap_table" in "bkdb11" with data @@ -211,6 +222,7 @@ Feature: Validate command line arguments And the timestamp from gpcrondump is stored @nbupartI + @ddpartI Scenario: 14 Full Backup with option -t and Restore Given the backup test is initialized with database "bkdb14" And there is a "heap" table "public.heap_table" in "bkdb14" with data @@ -224,6 +236,7 @@ Feature: Validate command line arguments And verify that the "report" file in " " dir contains "Backup Type: Full" @nbupartI + @ddpartI Scenario: 15 Full Backup with option -T and Restore Given the backup test is initialized with database "bkdb15" And there is a "heap" table "public.heap_table" in "bkdb15" with data @@ -236,6 +249,7 @@ Feature: Validate command line arguments And the timestamp from gpcrondump is stored @nbupartI + @ddpartI Scenario: 16 Full Backup with option --exclude-table-file and Restore Given the backup test is initialized with database "bkdb16" And there is a "heap" table "public.heap_table" in "bkdb16" with data @@ -249,6 +263,7 @@ Feature: Validate command line arguments And verify that the "report" file in " " dir contains "Backup Type: Full" @nbupartI + @ddpartI Scenario: 17 Full Backup with option --table-file and Restore Given the backup test is initialized with database "bkdb17" And there is a "heap" table "public.heap_table" in "bkdb17" with data @@ -293,6 +308,7 @@ Feature: Validate command line arguments And "dirty_list" file should be created under " " @nbupartI + @ddpartI Scenario: 20 No plan file generated Given the backup test is initialized with database "bkdb20" And there is a "ao" partition table "public.ao_part_table" in "bkdb20" with data @@ -314,6 +330,7 @@ Feature: Validate command line arguments And the timestamp from gpcrondump is stored @nbupartI + @ddpartI Scenario: 22 Simple Incremental Backup with AO/CO statistics w/ filter Given the backup test is initialized with database "bkdb22" And there is a "ao" table "public.ao_table" in "bkdb22" with data @@ -381,6 +398,7 @@ Feature: Validate command line arguments And all the data from "bkdb24" is saved for verification @nbupartI + @ddpartI Scenario: 25 Non compressed incremental backup Given the backup test is initialized with database "bkdb25" And schema "testschema" exists in "bkdb25" @@ -463,6 +481,7 @@ Feature: Validate command line arguments And "dirty_list" file should be created under " " And all the data from "bkdb28" is saved for verification + @ddpartI Scenario: 29 Verify gpdbrestore -s option works with full backup Given the backup test is initialized with database "bkdb29" And database "bkdb29-2" is dropped and recreated @@ -479,6 +498,7 @@ Feature: Validate command line arguments And the timestamp from gpcrondump is stored And the database "bkdb29-2" does not exist + @ddpartI Scenario: 30 Verify gpdbrestore -s option works with incremental backup Given the backup test is initialized with database "bkdb30" And database "bkdb30-2" is dropped and recreated @@ -538,6 +558,7 @@ Feature: Validate command line arguments Then the dump timestamp for "bkdb33, bkdb33-2" are different @nbupartI + @ddpartI Scenario: 34 gpdbrestore with --table-file option Given the backup test is initialized with database "bkdb34" And there is a "ao" table "public.ao_table" in "bkdb34" with data @@ -549,6 +570,7 @@ Feature: Validate command line arguments And all the data from "bkdb34" is saved for verification @nbupartI + @ddpartI Scenario: 35 Incremental restore with extra full backup Given the backup test is initialized with database "bkdb35" And there is a "heap" table "public.heap_table" in "bkdb35" with data @@ -577,6 +599,7 @@ Feature: Validate command line arguments And all the data from "bkdb36" is saved for verification @nbupartI + @ddpartI Scenario: 37 Full backup with -T option Given the database is running And the database "fullbkdb37" does not exist @@ -590,6 +613,7 @@ Feature: Validate command line arguments And all the data from "fullbkdb37" is saved for verification @nbupartI + @ddpartI Scenario: 38 gpdbrestore with -T option Given the backup test is initialized with database "bkdb38" And there is a "heap" table "public.heap_table" in "bkdb38" with data @@ -601,6 +625,7 @@ Feature: Validate command line arguments And all the data from "bkdb38" is saved for verification @nbupartI + @ddpartI Scenario: 39 Full backup and restore with -T and --truncate Given the backup test is initialized with database "bkdb39" And there is a "heap" table "public.heap_table" in "bkdb39" with data @@ -621,6 +646,7 @@ Feature: Validate command line arguments And table "public.heap_table" is dropped in "bkdb40" @nbupartII + @ddpartII Scenario: 41 Full backup -T with truncated table Given the backup test is initialized with database "bkdb41" And there is a "ao" partition table "public.ao_part_table" in "bkdb41" with data @@ -648,6 +674,7 @@ Feature: Validate command line arguments When table "public.ao_index_table" is dropped in "bkdb43" @nbupartII + @ddpartII Scenario: 44 Incremental restore with table filter Given the backup test is initialized with database "bkdb44" And there is a "heap" table "public.heap_table" in "bkdb44" with data @@ -715,6 +742,7 @@ Feature: Validate command line arguments And the timestamp from gpcrondump is stored And all the data from "bkdb49" is saved for verification + @ddpartII Scenario: 50 gpdbrestore -b option should display the timestamps in sorted order Given the backup test is initialized with database "bkdb50" And there is a "heap" table "public.heap_table" in "bkdb50" with data @@ -811,6 +839,7 @@ Feature: Validate command line arguments And the timestamp from gpcrondump is stored @nbupartII + @ddpartII Scenario: 56 Incremental table filter gpdbrestore with noplan option Given the backup test is initialized with database "bkdb56" And there is a "ao" partition table "public.ao_part_table" in "bkdb56" with data @@ -828,6 +857,7 @@ Feature: Validate command line arguments And the timestamp from gpcrondump is stored @nbupartII + @ddpartII Scenario: 57 gpdbrestore list_backup option Given the backup test is initialized with database "bkdb57" And there is a "heap" table "public.heap_table" in "bkdb57" with data @@ -853,6 +883,7 @@ Feature: Validate command line arguments And the timestamp from gpcrondump is stored @nbupartII + @ddpartII Scenario: 59 gpdbrestore list_backup option with full timestamp Given the backup test is initialized with database "bkdb59" And there is a "heap" table "public.heap_table" in "bkdb59" with data @@ -899,6 +930,7 @@ Feature: Validate command line arguments And the timestamp from gpcrondump is stored in a list @nbupartII + @ddpartII Scenario: 61 Incremental Backup and Restore with -t filter for Full Given the backup test is initialized with database "bkdb61" And the prefix "foo" is stored @@ -1007,6 +1039,7 @@ Feature: Validate command line arguments And all the data from "bkdb64" is saved for verification @nbupartII + @ddpartII Scenario: 65 Full Backup with option -T and non-existant table Given the backup test is initialized with database "bkdb65" And there is a "heap" table "public.heap_table" in "bkdb65" with data @@ -1177,6 +1210,7 @@ Feature: Validate command line arguments And the user runs "psql -c 'DROP ROLE foo_user' bkdb71" @nbupartII + @ddpartII Scenario: 72 Redirected Restore Full Backup and Restore without -e option Given the backup test is initialized with database "bkdb72" And the database "bkdb72-2" does not exist @@ -1188,6 +1222,7 @@ Feature: Validate command line arguments And the timestamp from gpcrondump is stored @nbupartII + @ddpartII Scenario: 73 Full Backup and Restore with -e option Given the backup test is initialized with database "bkdb73" And the database "bkdb73-2" does not exist @@ -1199,6 +1234,7 @@ Feature: Validate command line arguments And the timestamp from gpcrondump is stored @nbupartII + @ddpartII Scenario: 74 Incremental Backup and Redirected Restore Given the backup test is initialized with database "bkdb74" And the database "bkdb74-2" does not exist @@ -1213,6 +1249,7 @@ Feature: Validate command line arguments And all the data from "bkdb74" is saved for verification @nbupartII + @ddpartII Scenario: 75 Full backup and redirected restore with -T Given the backup test is initialized with database "bkdb75" And the database "bkdb75-2" does not exist @@ -1225,6 +1262,7 @@ Feature: Validate command line arguments And all the data from "bkdb75" is saved for verification @nbupartII + @ddpartII Scenario: 76 Full backup and redirected restore with -T and --truncate Given the backup test is initialized with database "bkdb76" And there is a "ao" table "public.ao_index_table" in "bkdb76" with data @@ -1234,6 +1272,7 @@ Feature: Validate command line arguments And all the data from "bkdb76" is saved for verification @nbupartII + @ddpartII Scenario: 77 Incremental redirected restore with table filter Given the backup test is initialized with database "bkdb77" And the database "bkdb77-2" does not exist @@ -1250,6 +1289,7 @@ Feature: Validate command line arguments And all the data from "bkdb77" is saved for verification @nbupartII + @ddpartII Scenario: 78 Full Backup and Redirected Restore with --prefix option Given the backup test is initialized with database "bkdb78" And the prefix "foo" is stored @@ -1263,6 +1303,7 @@ Feature: Validate command line arguments And there should be dump files under " " with prefix "foo" @nbupartII + @ddpartII Scenario: 79 Full Backup and Redirected Restore with --prefix option for multiple databases Given the backup test is initialized with database "bkdb79" And the prefix "foo" is stored @@ -1380,6 +1421,7 @@ Feature: Validate command line arguments And all the data from "bkdb89" is saved for verification @nbupartII + @ddpartII Scenario: 90 Writable Report/Status Directory Full Backup and Restore without --report-status-dir option Given the backup test is initialized with database "bkdb90" And there are no report files in "master_data_directory" @@ -1392,6 +1434,7 @@ Feature: Validate command line arguments And the timestamp from gpcrondump is stored @nbupartII + @ddpartII Scenario: 91 Writable Report/Status Directory Full Backup and Restore with --report-status-dir option Given the backup test is initialized with database "bkdb91" And there is a "heap" table "public.heap_table" in "bkdb91" with data @@ -1423,6 +1466,7 @@ Feature: Validate command line arguments And the user runs command "chmod -R 555 /tmp/custom_timestamps/db_dumps" @nbupartII + @ddpartII Scenario: 94 Filtered Full Backup with Partition Table Given the backup test is initialized with database "bkdb94" And there is a "heap" table "public.heap_table" in "bkdb94" with data @@ -1433,6 +1477,7 @@ Feature: Validate command line arguments And all the data from "bkdb94" is saved for verification @nbupartIII + @ddpartIII Scenario: 95 Filtered Incremental Backup with Partition Table Given the backup test is initialized with database "bkdb95" And there is a "heap" table "public.heap_table" in "bkdb95" with data @@ -1445,6 +1490,7 @@ Feature: Validate command line arguments And all the data from "bkdb95" is saved for verification @nbupartIII + @ddpartIII Scenario: 96 gpdbrestore runs ANALYZE on restored table only Given the backup test is initialized with database "bkdb96" And there is a "heap" table "public.heap_table" in "bkdb96" with data @@ -1459,6 +1505,7 @@ Feature: Validate command line arguments And the user deletes rows from the table "heap_table" of database "bkdb96" where "column1" is "1088" @nbupartIII + @ddpartIII Scenario: 97 Full Backup with multiple -S option and Restore Given the backup test is initialized with database "bkdb97" And schema "schema_heap, schema_ao, testschema" exists in "bkdb97" @@ -1472,6 +1519,7 @@ Feature: Validate command line arguments And verify that the "report" file in " " dir contains "Backup Type: Full" @nbupartIII + @ddpartIII Scenario: 98 Full Backup with option -S and Restore Given the backup test is initialized with database "bkdb98" And schema "schema_heap, schema_ao" exists in "bkdb98" @@ -1484,6 +1532,7 @@ Feature: Validate command line arguments And verify that the "report" file in " " dir contains "Backup Type: Full" @nbupartIII + @ddpartIII Scenario: 99 Full Backup with option -s and Restore Given the backup test is initialized with database "bkdb99" And schema "schema_heap, schema_ao" exists in "bkdb99" @@ -1496,6 +1545,7 @@ Feature: Validate command line arguments And verify that the "report" file in " " dir contains "Backup Type: Full" @nbupartIII + @ddpartIII Scenario: 100 Full Backup with option --exclude-schema-file and Restore Given the backup test is initialized with database "bkdb100" And schema "schema_heap, schema_ao, testschema" exists in "bkdb100" @@ -1510,6 +1560,7 @@ Feature: Validate command line arguments And verify that the "report" file in " " dir contains "Backup Type: Full" @nbupartIII + @ddpartIII Scenario: 101 Full Backup with option --schema-file and Restore Given the backup test is initialized with database "bkdb101" And schema "schema_heap, schema_ao, testschema" exists in "bkdb101" @@ -1524,6 +1575,7 @@ Feature: Validate command line arguments And verify that the "report" file in " " dir contains "Backup Type: Full" @nbupartIII + @ddpartIII Scenario: 106 Full Backup and Restore with option --change-schema Given the backup test is initialized with database "bkdb106" And schema "schema_heap, schema_ao, schema_new" exists in "bkdb106" @@ -1565,6 +1617,7 @@ Feature: Validate command line arguments And "statistics" file should be created under " " @nbupartIII + @ddpartIII Scenario: 109 Backup and restore with statistics and table filters Given the backup test is initialized with database "bkdb109" And there is a "heap" table "public.heap_table" in "bkdb109" with data @@ -1586,6 +1639,7 @@ Feature: Validate command line arguments And the timestamp from gpcrondump is stored @nbupartIII + @ddpartIII Scenario: 111 Full Backup with option --schema-file with prefix option and Restore Given the backup test is initialized with database "bkdb111" And the prefix "foo" is stored @@ -1609,6 +1663,7 @@ Feature: Validate command line arguments And the timestamp from gpcrondump is stored @nbupartIII + @ddpartIII Scenario: 113 Simple Full Backup with AO/CO statistics w/ filter schema Given the backup test is initialized with database "bkdb113" And schema "schema_ao, testschema" exists in "bkdb113" @@ -1622,6 +1677,7 @@ Feature: Validate command line arguments And the timestamp from gpcrondump is stored @nbupartIII + @ddpartIII Scenario: 114 Restore with --redirect option should not rely on existance of dumped database Given the backup test is initialized with database "bkdb114" When the user runs "gpcrondump -a -x bkdb114" @@ -1655,6 +1711,7 @@ Feature: Validate command line arguments And the timestamp from gpcrondump is stored @nbupartIII + @ddpartIII Scenario: 117 Schema level restore with gpdbrestore -S option for views, sequences, and functions Given the user runs "psql -f test/behave/mgmt_utils/steps/data/schema_level_test_workload.sql template1" When the user runs "gpcrondump -a -x schema_level_test_db" @@ -1686,6 +1743,7 @@ Feature: Validate command line arguments And the timestamp from gpcrondump is stored @nbupartIII + @ddpartIII Scenario: 122 gpcrondump with --exclude-table-file option where table name, schema name and database name contains special character Given the backup test is initialized for special characters And a list of files "122_ao,122_heap" of tables "$SP_CHAR_SCHEMA.$SP_CHAR_AO,$SP_CHAR_SCHEMA.$SP_CHAR_HEAP" in "$SP_CHAR_DB" exists for validation @@ -1708,6 +1766,7 @@ Feature: Validate command line arguments And the timestamp from gpcrondump is stored @nbupartIII + @ddpartIII Scenario: 125 gpcrondump with --schema-file option when schema name and database name contains special character Given the backup test is initialized for special characters When the user runs "gpcrondump -a -x "$SP_CHAR_DB" --schema-file test/behave/mgmt_utils/steps/data/special_chars/schema-file.txt" @@ -1742,6 +1801,7 @@ Feature: Validate command line arguments And the timestamp from gpcrondump is stored @nbupartIII + @ddpartIII Scenario: 131 gpcrondump with --incremental option when table name, schema name and database name contains special character Given the backup test is initialized for special characters When the user runs "gpcrondump -a -x "$SP_CHAR_DB"" @@ -1753,6 +1813,7 @@ Feature: Validate command line arguments When the user runs command "psql -f test/behave/mgmt_utils/steps/data/special_chars/select_from_special_table.sql "$SP_CHAR_DB" > /tmp/131_special_table_data.ans" @nbupartIII + @ddpartIII Scenario: 132 gpdbrestore with --redirect option with special db name, and all table name, schema name and database name contain special character Given the backup test is initialized for special characters When the user runs "gpcrondump -a -x "$SP_CHAR_DB"" diff --git a/gpMgmt/test/behave/mgmt_utils/backup_and_restore_restores.feature b/gpMgmt/test/behave/mgmt_utils/backup_and_restore_restores.feature index c34c729731..53c9be6bf3 100644 --- a/gpMgmt/test/behave/mgmt_utils/backup_and_restore_restores.feature +++ b/gpMgmt/test/behave/mgmt_utils/backup_and_restore_restores.feature @@ -1,10 +1,10 @@ @backup_and_restore_restores Feature: Validate command line arguments + @nbuonly @nbusetup77 Scenario: Setup to load NBU libraries Given the test suite is initialized for Netbackup "7.7" - And the netbackup storage params have been parsed Scenario: 1 Dirty table list check on recreating a table with same data and contents Given the old timestamps are read from json @@ -13,6 +13,7 @@ Feature: Validate command line arguments And verify that plan file has latest timestamp for "public.ao_table" @nbupartI + @ddpartI Scenario: 2 Simple Incremental Backup Given the old timestamps are read from json When the user runs gpdbrestore -e with the stored timestamp @@ -60,6 +61,7 @@ Feature: Validate command line arguments And gpdbrestore should print "-R is not supported for restore with incremental timestamp" to stdout @nbupartI + @ddpartI Scenario: 5a Full Backup and Restore Given the old timestamps are read from json When the user runs gpdbrestore -e with the stored timestamp @@ -77,6 +79,7 @@ Feature: Validate command line arguments And verify that there is an index "my_unique_index" in "bkdb5a" @nbupartI + @ddpartI Scenario: 6 Metadata-only restore Given the old timestamps are read from json When the user runs gpdbrestore -e with the stored timestamp and options "-m" @@ -86,6 +89,7 @@ Feature: Validate command line arguments And tables in "bkdb6" should not contain any data @nbupartI + @ddpartI Scenario: 7 Metadata-only restore with global objects (-G) Given the old timestamps are read from json When the user runs gpdbrestore -e with the stored timestamp and options "-m -G" @@ -96,6 +100,7 @@ Feature: Validate command line arguments And verify that a role "foo%userWITHCAPS" exists in database "bkdb7" And the user runs "psql -c 'DROP ROLE "foo%userWITHCAPS"' bkdb7" + @ddpartI Scenario: 8 gpdbrestore -L with Full Backup Given the old timestamps are read from json When the user runs gpdbrestore -e with the stored timestamp and options "-L" @@ -104,6 +109,7 @@ Feature: Validate command line arguments And gpdbrestore should print "Table public.heap_table" to stdout @nbupartI + @ddpartI Scenario: 11 Backup and restore with -G only Given the old timestamps are read from json When the user runs gpdbrestore -e with the stored timestamp and options "-G only" @@ -123,6 +129,7 @@ Feature: Validate command line arguments Then the user runs valgrind with "gp_restore_agent" and options "--gp-c /bin/gunzip -s --post-data-schema-only --target-dbid 1 -d bkdb13" @nbupartI + @ddpartI Scenario: 14 Full Backup with option -t and Restore Given the old timestamps are read from json When the user runs gpdbrestore -e with the stored timestamp @@ -131,6 +138,7 @@ Feature: Validate command line arguments And verify that there is no table "public.ao_part_table" in "bkdb14" @nbupartI + @ddpartI Scenario: 15 Full Backup with option -T and Restore Given the old timestamps are read from json When the user runs gpdbrestore -e with the stored timestamp @@ -140,6 +148,7 @@ Feature: Validate command line arguments And verify that there is no table "public.heap_table" in "bkdb15" @nbupartI + @ddpartI Scenario: 16 Full Backup with option --exclude-table-file and Restore Given the old timestamps are read from json When the user runs gpdbrestore -e with the stored timestamp @@ -149,6 +158,7 @@ Feature: Validate command line arguments And verify that there is no table "public.heap_table" in "bkdb16" @nbupartI + @ddpartI Scenario: 17 Full Backup with option --table-file and Restore Given the old timestamps are read from json When the user runs gpdbrestore -e with the stored timestamp @@ -166,6 +176,7 @@ Feature: Validate command line arguments And the plan file for scenario "19" is validated against "data/bar_plan1" @nbupartI + @ddpartI Scenario: 20 No plan file generated Given the old timestamps are read from json When the user runs gpdbrestore -e with the stored timestamp @@ -180,6 +191,7 @@ Feature: Validate command line arguments And tables names in database "bkdb21" should be identical to stored table names in file "part_table_names" @nbupartI + @ddpartI Scenario: 22 Simple Incremental Backup with AO/CO statistics w/ filter Given the old timestamps are read from json When the user runs gpdbrestore -e with the stored timestamp and options "--noaostats" @@ -206,6 +218,7 @@ Feature: Validate command line arguments And verify that the tuple count of all appendonly tables are consistent in "bkdb24" @nbupartI + @ddpartI Scenario: 25 Non compressed incremental backup Given the old timestamps are read from json Then the user runs gpdbrestore -e with the stored timestamp @@ -242,6 +255,7 @@ Feature: Validate command line arguments And verify that the data of "3" tables in "bkdb28" is validated after restore And verify that the tuple count of all appendonly tables are consistent in "bkdb28" + @ddpartI Scenario: 29 Verify gpdbrestore -s option works with full backup Given the old timestamps are read from json When the user runs "gpdbrestore -e -s bkdb29 -a" @@ -250,6 +264,7 @@ Feature: Validate command line arguments And verify that the tuple count of all appendonly tables are consistent in "bkdb29" And verify that database "bkdb29-2" does not exist + @ddpartI Scenario: 30 Verify gpdbrestore -s option works with incremental backup Given the old timestamps are read from json When the user runs "gpdbrestore -e -s bkdb30 -a" @@ -286,6 +301,7 @@ Feature: Validate command line arguments And verify that the tuple count of all appendonly tables are consistent in "bkdb33-2" @nbupartI + @ddpartI Scenario: 34 gpdbrestore with --table-file option Given the old timestamps are read from json When the user runs gpdbrestore -e with the stored timestamp and options "--table-file /tmp/table_file_foo" @@ -297,6 +313,7 @@ Feature: Validate command line arguments Then the file "/tmp/table_file_foo" is removed from the system @nbupartI + @ddpartI Scenario: 35 Incremental restore with extra full backup Given the old timestamps are read from json When the user runs gpdbrestore -e with the stored timestamp @@ -315,6 +332,7 @@ Feature: Validate command line arguments Then the file "/tmp/ext_tab" is removed from the system @nbupartI + @ddpartI Scenario: 37 Full backup with -T option Given the old timestamps are read from json When the user runs gpdbrestore -e with the stored timestamp and options "-T public.ao_index_table" @@ -324,6 +342,7 @@ Feature: Validate command line arguments And verify that there is a "ao" table "public.ao_index_table" in "fullbkdb37" with data @nbupartI + @ddpartI Scenario: 38 gpdbrestore with -T option Given the old timestamps are read from json When the user runs gpdbrestore -e with the stored timestamp and options "-T public.ao_index_table -a" @@ -333,6 +352,7 @@ Feature: Validate command line arguments And verify that there is a "ao" table "public.ao_index_table" in "bkdb38" with data @nbupartI + @ddpartI Scenario: 39 Full backup and restore with -T and --truncate Given the old timestamps are read from json And the backup test is initialized with database "bkdb39" @@ -355,6 +375,7 @@ Feature: Validate command line arguments And verify that there is a "heap" table "public.heap_table" in "bkdb40" with data @nbupartII + @ddpartII Scenario: 41 Full backup -T with truncated table Given the old timestamps are read from json And the backup test is initialized with database "bkdb41" @@ -380,6 +401,7 @@ Feature: Validate command line arguments And verify that there is a "ao" table "public.ao_index_table" in "bkdb43" with data @nbupartII + @ddpartII Scenario: 44 Incremental restore with table filter Given the old timestamps are read from json When the user runs gpdbrestore -e with the stored timestamp and options "-T public.ao_table -T public.co_table" @@ -420,6 +442,7 @@ Feature: Validate command line arguments And verify that the data of "12" tables in "bkdb49" is validated after restore And verify that the tuple count of all appendonly tables are consistent in "bkdb49" + @ddpartII Scenario: 50 gpdbrestore -b option should display the timestamps in sorted order Given the old timestamps are read from json Then the user runs gpdbrestore -e with the date directory @@ -495,6 +518,7 @@ Feature: Validate command line arguments And verify that tables "public.ao_part_table1_1_prt_p1_2_prt_1, public.ao_part_table1_1_prt_p2_2_prt_1" in "bkdb56" has no rows @nbupartII + @ddpartII Scenario: 57 gpdbrestore list_backup option Given the old timestamps are read from json And the backup test is initialized with database "bkdb57" @@ -515,6 +539,7 @@ Feature: Validate command line arguments And gpdbrestore should print "Cannot specify -T and --list-backup together" to stdout @nbupartII + @ddpartII Scenario: 59 gpdbrestore list_backup option with full timestamp Given the old timestamps are read from json And the backup test is initialized with database "bkdb59" @@ -538,6 +563,7 @@ Feature: Validate command line arguments And close all opened pipes @nbupartII + @ddpartII Scenario: 61 Incremental Backup and Restore with -t filter for Full Given the old timestamps are read from json When the user runs gpdbrestore -e with the stored timestamp and options "--prefix=foo" @@ -571,6 +597,7 @@ Feature: Validate command line arguments And verify that there is no table "public.heap_table" in "bkdb64" @nbupartII + @ddpartII Scenario: 65 Full Backup with option -T and non-existant table Given the old timestamps are read from json When the user runs gpdbrestore -e with the stored timestamp @@ -703,6 +730,7 @@ Feature: Validate command line arguments And the file "/tmp/71_describe_multi_byte_char_after" is removed from the system @nbupartII + @ddpartII Scenario: 72 Redirected Restore Full Backup and Restore without -e option Given the old timestamps are read from json When the user runs gpdbrestore -e with the stored timestamp and options "--redirect=bkdb72-2" @@ -711,6 +739,7 @@ Feature: Validate command line arguments And check that there is a "ao" table "public.ao_part_table" in "bkdb72-2" with same data from "bkdb72" @nbupartII + @ddpartII Scenario: 73 Full Backup and Restore with -e option Given the old timestamps are read from json When the user runs gpdbrestore -e with the stored timestamp and options "--redirect=bkdb73-2" @@ -719,6 +748,7 @@ Feature: Validate command line arguments And check that there is a "ao" table "public.ao_part_table" in "bkdb73-2" with same data from "bkdb73" @nbupartII + @ddpartII Scenario: 74 Incremental Backup and Redirected Restore Given the old timestamps are read from json When the user runs gpdbrestore -e with the stored timestamp and options "--redirect=bkdb74-2" @@ -726,6 +756,7 @@ Feature: Validate command line arguments And verify that the data of "11" tables in "bkdb74-2" is validated after restore from "bkdb74" @nbupartII + @ddpartII Scenario: 75 Full backup and redirected restore with -T Given the old timestamps are read from json When the user runs gpdbrestore -e with the stored timestamp and options "-T public.ao_index_table --redirect=bkdb75-2" @@ -733,6 +764,7 @@ Feature: Validate command line arguments And check that there is a "ao" table "public.ao_index_table" in "bkdb75-2" with same data from "bkdb75" @nbupartII + @ddpartII Scenario: 76 Full backup and redirected restore with -T and --truncate Given the old timestamps are read from json And the database "bkdb76-2" does not exist @@ -745,6 +777,7 @@ Feature: Validate command line arguments And check that there is a "ao" table "public.ao_index_table" in "bkdb76-2" with same data from "bkdb76" @nbupartII + @ddpartII Scenario: 77 Incremental redirected restore with table filter Given the old timestamps are read from json When the user runs gpdbrestore -e with the stored timestamp and options "-T public.ao_table -T public.co_table --redirect=bkdb77-2" @@ -752,6 +785,7 @@ Feature: Validate command line arguments And verify that exactly "2" tables in "bkdb77-2" have been restored from "bkdb77" @nbupartII + @ddpartII Scenario: 78 Full Backup and Redirected Restore with --prefix option Given the old timestamps are read from json When the user runs gpdbrestore -e with the stored timestamp and options "--prefix=foo --redirect=bkdb78-2" @@ -760,6 +794,7 @@ Feature: Validate command line arguments And check that there is a "ao" table "public.ao_part_table" in "bkdb78-2" with same data from "bkdb78" @nbupartII + @ddpartII Scenario: 79 Full Backup and Redirected Restore with --prefix option for multiple databases Given the old timestamps are read from json When the user runs gpdbrestore -e with the stored timestamp and options "--prefix=foo --redirect=bkdb79-3" @@ -851,6 +886,7 @@ Feature: Validate command line arguments And verify that the tuple count of all appendonly tables are consistent in "bkdb89" @nbupartII + @ddpartII Scenario: 90 Writable Report/Status Directory Full Backup and Restore without --report-status-dir option Given the old timestamps are read from json When the user runs gpdbrestore -e with the stored timestamp @@ -865,6 +901,7 @@ Feature: Validate command line arguments And there are no status files in "segment_data_directory" @nbupartII + @ddpartII Scenario: 91 Writable Report/Status Directory Full Backup and Restore with --report-status-dir option Given the old timestamps are read from json When the user runs gpdbrestore -e with the stored timestamp and options "--report-status-dir=/tmp" @@ -904,6 +941,7 @@ Feature: Validate command line arguments And the user runs command "chmod -R 777 /tmp/custom_timestamps/db_dumps" @nbupartII + @ddpartII Scenario: 94 Filtered Full Backup with Partition Table Given the old timestamps are read from json When the user runs gpdbrestore -e with the stored timestamp and options "-T public.ao_part_table" @@ -913,6 +951,7 @@ Feature: Validate command line arguments And verify that the data of "9" tables in "bkdb94" is validated after restore @nbupartIII + @ddpartIII Scenario: 95 Filtered Incremental Backup with Partition Table Given the old timestamps are read from json When the user runs gpdbrestore -e with the stored timestamp and options "-T public.ao_part_table" @@ -922,6 +961,7 @@ Feature: Validate command line arguments And verify that the data of "9" tables in "bkdb95" is validated after restore @nbupartIII + @ddpartIII Scenario: 96 gpdbrestore runs ANALYZE on restored table only Given the old timestamps are read from json And the backup test is initialized with database "bkdb96" @@ -933,6 +973,7 @@ Feature: Validate command line arguments And verify that the table "public.heap_table" in database "bkdb96" is not analyzed @nbupartIII + @ddpartIII Scenario: 97 Full Backup with multiple -S option and Restore Given the old timestamps are read from json When the user runs gpdbrestore -e with the stored timestamp @@ -942,6 +983,7 @@ Feature: Validate command line arguments And verify that there is a "ao" table "schema_ao.ao_part_table" in "bkdb97" with data @nbupartIII + @ddpartIII Scenario: 98 Full Backup with option -S and Restore Given the old timestamps are read from json When the user runs gpdbrestore -e with the stored timestamp @@ -950,6 +992,7 @@ Feature: Validate command line arguments And verify that there is a "ao" table "schema_ao.ao_part_table" in "bkdb98" with data @nbupartIII + @ddpartIII Scenario: 99 Full Backup with option -s and Restore Given the old timestamps are read from json When the user runs gpdbrestore -e with the stored timestamp @@ -958,6 +1001,7 @@ Feature: Validate command line arguments And verify that there is no table "schema_ao.ao_part_table" in "bkdb99" @nbupartIII + @ddpartIII Scenario: 100 Full Backup with option --exclude-schema-file and Restore Given the old timestamps are read from json When the user runs gpdbrestore -e with the stored timestamp @@ -967,6 +1011,7 @@ Feature: Validate command line arguments And verify that there is no table "testschema.heap_table" in "bkdb100" @nbupartIII + @ddpartIII Scenario: 101 Full Backup with option --schema-file and Restore Given the old timestamps are read from json When the user runs gpdbrestore -e with the stored timestamp @@ -976,6 +1021,7 @@ Feature: Validate command line arguments And verify that there is no table "testschema.heap_table" in "bkdb101" @nbupartIII + @ddpartIII Scenario: 106 Full Backup and Restore with option --change-schema Given the old timestamps are read from json And the backup test is initialized with database "bkdb106" @@ -1013,6 +1059,7 @@ Feature: Validate command line arguments And verify that the restored table "public.ao_part_table" in database "bkdb108" is analyzed @nbupartIII + @ddpartIII Scenario: 109 Backup and restore with statistics and table filters Given the old timestamps are read from json When the user runs gpdbrestore -e with the stored timestamp and options "-T public.heap_index_table --noanalyze" @@ -1030,6 +1077,7 @@ Feature: Validate command line arguments Then gpdbrestore should not print "Issue with 'ANALYZE' of restored table 'public.heap_table2' in 'bkdb110' database" to stdout @nbupartIII + @ddpartIII Scenario: 111 Full Backup with option --schema-file with prefix option and Restore Given the old timestamps are read from json When the user runs gpdbrestore -e with the stored timestamp and options "--prefix=foo" @@ -1050,6 +1098,7 @@ Feature: Validate command line arguments And verify that there are "4380" tuples in "bkdb112" for table "public.ao_table" @nbupartIII + @ddpartIII Scenario: 113 Simple Full Backup with AO/CO statistics w/ filter schema Given the old timestamps are read from json When the user runs gpdbrestore -e with the stored timestamp and options "--noaostats" @@ -1085,6 +1134,7 @@ Feature: Validate command line arguments And verify that there are "0" tuples in "bkdb113" for table "schema_ao.ao_part_table" @nbupartIII + @ddpartIII Scenario: 114 Restore with --redirect option should not rely on existance of dumped database Given the old timestamps are read from json When the user runs gpdbrestore -e with the stored timestamp and options "--redirect=bkdb114" @@ -1109,6 +1159,7 @@ Feature: Validate command line arguments And verify that there are "2190" tuples in "bkdb116" for table "public.foo4" @nbupartIII + @ddpartIII Scenario: 117 Schema level restore with gpdbrestore -S option for views, sequences, and functions Given the old timestamps are read from json When the user runs gpdbrestore -e with the stored timestamp and options "-S s1" @@ -1146,6 +1197,7 @@ Feature: Validate command line arguments And the user runs command "dropdb "$SP_CHAR_DB"" @nbupartIII + @ddpartIII Scenario: 122 gpcrondump with --exclude-table-file option where table name, schema name and database name contains special character Given the old timestamps are read from json And the backup test is initialized for special characters @@ -1237,6 +1289,7 @@ Feature: Validate command line arguments When the user runs command "dropdb "$SP_CHAR_DB"" @nbupartIII + @ddpartIII Scenario: 131 gpcrondump with --incremental option when table name, schema name and database name contains special character Given the old timestamps are read from json And the backup test is initialized for special characters @@ -1246,6 +1299,7 @@ Feature: Validate command line arguments When the user runs command "dropdb "$SP_CHAR_DB"" @nbupartIII + @ddpartIII Scenario: 132 gpdbrestore with --redirect option with special db name, and all table name, schema name and database name contain special character Given the old timestamps are read from json And the backup test is initialized for special characters @@ -1408,3 +1462,8 @@ Feature: Validate command line arguments And there should be dump files under "/tmp" with prefix "foo" And check that there is a "heap" table "public.heap_table" in "bkdb145-2" with same data from "bkdb145" And check that there is a "ao" table "public.ao_part_table" in "bkdb145-2" with same data from "bkdb145" + + @ddonly + @ddboostsetup + Scenario: 146 Cleanup DDBoost dump directories + Given the DDBoost dump directory is deleted diff --git a/gpMgmt/test/behave/mgmt_utils/backups.feature b/gpMgmt/test/behave/mgmt_utils/backups.feature index 938fb44fe8..a17d702863 100644 --- a/gpMgmt/test/behave/mgmt_utils/backups.feature +++ b/gpMgmt/test/behave/mgmt_utils/backups.feature @@ -197,6 +197,7 @@ Feature: Validate command line arguments And gpcrondump should print "Invalid state file format" to stdout @nbupartIII + @ddpartIII Scenario: Increments File Check With Complicated Scenario Given the backup test is initialized with no backup files And database "bkdb2" is dropped and recreated @@ -266,6 +267,7 @@ Feature: Validate command line arguments And gpcrondump should print "Dump type = Incremental" to stdout @nbupartIII + @ddpartIII Scenario: gpcrondump -G with Full timestamp Given the backup test is initialized with no backup files And there is a "heap" table "public.heap_table" in "bkdb" with data @@ -280,7 +282,6 @@ Feature: Validate command line arguments Then gpcrondump should return a return code of 0 And "global" file should be created under " " - @nbupartIII @valgrind Scenario: Valgrind test of gp_dump incremental Given the backup test is initialized with no backup files @@ -330,7 +331,6 @@ Feature: Validate command line arguments Then gpcrondump should return a return code of 0 And the user runs valgrind with "gp_dump_agent --gp-k 11111111111111_-1_1_ --gp-d /tmp --pre-data-schema-only bkdb --incremental --table-file=/tmp/dirty_hack.txt" and options " " - @nbupartIII @valgrind Scenario: Valgrind test of gp_dump_agent full with table file Given the backup test is initialized with no backup files @@ -345,7 +345,6 @@ Feature: Validate command line arguments And the user runs valgrind with "gp_dump_agent --gp-k 11111111111111_-1_1_ --gp-d /tmp --pre-data-schema-only bkdb --table-file=/tmp/dirty_hack.txt" and options " " @valgrind - @nbupartIII Scenario: Valgrind test of gp_dump_agent incremental Given the backup test is initialized with no backup files And there is a "heap" table "public.heap_table" in "bkdb" with data @@ -428,6 +427,7 @@ Feature: Validate command line arguments And the dump directory for the stored timestamp should exist @nbupartIII + @ddpartIII Scenario: Verify the gpcrondump -g option works with full backup Given the backup test is initialized with no backup files When the user runs "gpcrondump -a -x bkdb -g" @@ -436,6 +436,7 @@ Feature: Validate command line arguments And config files should be backed up on all segments @nbupartIII + @ddpartIII Scenario: Verify the gpcrondump -g option works with incremental backup Given the backup test is initialized with no backup files When the user runs "gpcrondump -a -x bkdb" @@ -446,6 +447,7 @@ Feature: Validate command line arguments And config files should be backed up on all segments @nbupartIII + @ddpartIII Scenario: Verify the gpcrondump history table works by default with full and incremental backups Given the backup test is initialized with no backup files And schema "testschema" exists in "bkdb" @@ -464,6 +466,7 @@ Feature: Validate command line arguments And verify that the table "gpcrondump_history" in "bkdb" has dump info for the stored timestamp @nbupartIII + @ddpartIII Scenario: Verify the gpcrondump -H option should not create history table Given the backup test is initialized with no backup files And schema "testschema" exists in "bkdb" @@ -479,6 +482,7 @@ Feature: Validate command line arguments And gpcrondump should print "-H option cannot be selected with -h option" to stdout @nbupartIII + @ddpartIII Scenario: Config files have the same timestamp as the backup set Given the backup test is initialized with no backup files And there is a "heap" table "public.heap_table" in "bkdb" with data @@ -486,9 +490,10 @@ Feature: Validate command line arguments When the user runs "gpcrondump -a -x bkdb -g" And the timestamp from gpcrondump is stored Then gpcrondump should return a return code of 0 - And verify that the config files are backed up with the stored timestamp + And config files should be backed up on all segments @nbupartIII + @ddpartIII Scenario Outline: Incremental Backup With column-inserts, inserts and oids options Given the backup test is initialized with no backup files When the user runs "gpcrondump -a --incremental -x bkdb " @@ -524,6 +529,7 @@ Feature: Validate command line arguments Then gpcrondump should return a return code of 0 @nbupartIII + @ddpartIII Scenario: Full Backup with option -t and non-existant table Given the backup test is initialized with no backup files And there is a "heap" table "public.heap_table" in "bkdb" with data @@ -693,17 +699,74 @@ Feature: Validate command line arguments Then verify the metadata dump file does not contain "ALTER TABLE * OWNER TO" @nbupartIII - Scenario: gpcrondump with -u, -G, and -g + Scenario: gpcrondump with -G and -g Given the backup test is initialized with no backup files And there is a "heap" table "public.heap_table" in "bkdb" with data And there is a "ao" table "public.ao_index_table" in "bkdb" with data - When the user runs "gpcrondump -a -x bkdb -G -g -u /tmp" + When the user runs "gpcrondump -a -x bkdb -G -g" And the timestamp from gpcrondump is stored Then gpcrondump should return a return code of 0 - And "global" file should be created under "/tmp" - And config files should be backed up on all segments in directory "/tmp" + And "global" file should be created under " " + And config files should be backed up on all segments + + Scenario: gpcrondump with -k (vacuum after backup) + Given the backup test is initialized with no backup files + And the user runs "psql -c 'vacuum;' bkdb" + Then store the vacuum timestamp for verification in database "bkdb" + When the user runs "gpcrondump -a -x bkdb -k" + Then gpcrondump should return a return code of 0 + Then gpcrondump should print "Commencing post-dump vacuum" to stdout + And gpcrondump should print "Vacuum of bkdb completed without error" to stdout + And verify that vacuum has been run in database "bkdb" + + Scenario: gpcrondump with -f when not enough disk space + Given the backup test is initialized with no backup files + When the user runs "gpcrondump -a -x bkdb -f 100" + Then gpcrondump should return a return code of 2 + And gpcrondump should print "segment\(s\) failed disk space checks" to stdout + + Scenario: gpcrondump with -B with 1 process + Given the backup test is initialized with no backup files + When the user runs "gpcrondump -a -x bkdb -B 1 -v" + Then gpcrondump should return a return code of 0 + And gpcrondump should not print "\[worker1\] got a halt cmd" to stdout + + Scenario: gpcrondump with -d with invalid master data directory + Given the backup test is initialized with no backup files + When the user runs "gpcrondump -a -x bkdb -d /tmp" + Then gpcrondump should return a return code of 2 + And gpcrondump should print "gpcrondump failed.* No such file or directory" to stdout + + Scenario: gpcrondump with -l to log to /tmp directory + Given the backup test is initialized with no backup files + When the user runs "gpcrondump -a -x no_exist -l /tmp" + Then gpcrondump should return a return code of 2 + And the "gpcrondump" log file should exist under "/tmp" + + @ddpartIII + @ddonly + Scenario: gpcrondump with -c on Data Domain + Given the backup test is initialized with no backup files + When the user runs "gpcrondump -a -x bkdb -K 20150101010101" + Then gpcrondump should return a return code of 0 + And the full backup timestamp from gpcrondump is stored + When the user runs "gpcrondump -a -x bkdb -c" + Then gpcrondump should return a return code of 0 + Then no dump files should be present on the data domain server + + @ddpartIII + @ddonly + Scenario: gpcrondump with -o on Data Domain + Given the backup test is initialized with no backup files + When the user runs "gpcrondump -a -x bkdb -K 20150101010101" + Then gpcrondump should return a return code of 0 + And the full backup timestamp from gpcrondump is stored + When the user runs "gpcrondump -a -x bkdb -o" + Then gpcrondump should return a return code of 0 + Then no dump files should be present on the data domain server @nbupartIII + @ddpartIII Scenario: Out of Sync timestamp Given the backup test is initialized with no backup files And there is a "ao" table "public.ao_table" in "bkdb" with data @@ -715,5 +778,11 @@ Feature: Validate command line arguments And gpcrondump should print "There is a future dated backup on the system preventing new backups" to stdout @nbupartIII + @ddpartIII Scenario: The test suite is torn down Given the backup test is initialized with no backup files + + @ddonly + @ddboostsetup + Scenario: Cleanup DDBoost dump directories + Given the DDBoost dump directory is deleted diff --git a/gpMgmt/test/behave/mgmt_utils/restores.feature b/gpMgmt/test/behave/mgmt_utils/restores.feature index fef2dc00b9..a85ea18495 100644 --- a/gpMgmt/test/behave/mgmt_utils/restores.feature +++ b/gpMgmt/test/behave/mgmt_utils/restores.feature @@ -32,6 +32,7 @@ Feature: Validate command line arguments Then gpdbrestore should return a return code of 2 And gpdbrestore should print "Name has an invalid character" to stdout + @ddpartIII Scenario: gpdbrestore -b with Full timestamp Given the backup test is initialized with no backup files And there is a "ao" table "public.ao_index_table" in "bkdb" with data @@ -82,3 +83,18 @@ Feature: Validate command line arguments Then gpdbrestore should return a return code of 2 And gpdbrestore should print "-u cannot be used with DDBoost parameters" to stdout + Scenario: gpdbrestore with -d with invalid master data directory + When the user runs "gpdbrestore -a -t 20140101010101 -d /tmp" + Then gpdbrestore should return a return code of 2 + And gpdbrestore should print "gpdbrestore failed.* No such file or directory" to stdout + + Scenario: gpdbrestore with -l to log to /tmp directory + Given the backup test is initialized with no backup files + When the user runs "gpdbrestore -l /tmp" + Then gpdbrestore should return a return code of 2 + And the "gpdbrestore" log file should exist under "/tmp" + + @ddonly + @ddboostsetup + Scenario: Cleanup DDBoost dump directories + Given the DDBoost dump directory is deleted diff --git a/gpMgmt/test/behave/mgmt_utils/steps/mgmt_utils.py b/gpMgmt/test/behave/mgmt_utils/steps/mgmt_utils.py index c222678b8d..090d3b51c3 100644 --- a/gpMgmt/test/behave/mgmt_utils/steps/mgmt_utils.py +++ b/gpMgmt/test/behave/mgmt_utils/steps/mgmt_utils.py @@ -3,6 +3,11 @@ import getpass import glob import gzip import json +import yaml +try: + import pexpect +except: + print "The pexpect module could not be imported." import os import platform @@ -30,7 +35,6 @@ from test.behave_utils.gpfdist_utils.gpfdist_mgmt import Gpfdist from test.behave_utils.utils import * from test.behave_utils.PgHba import PgHba, Entry from gppylib.commands.base import Command, REMOTE -import yaml labels_json = '/tmp/old_to_new_timestamp_labels.json' timestamp_json = '/tmp/old_to_new_timestamps.json' @@ -390,11 +394,10 @@ def impl(context, dbname): @when('the user runs "{command}"') @then('the user runs "{command}"') def impl(context, command): - if use_netbackup(): - if 'gpcrondump' in command: - command = append_storage_config_to_backup_command(context, command) - elif 'gpdbrestore' in command: - command = append_storage_config_to_restore_command(context, command) + if 'gpcrondump' in command: + command = append_storage_config_to_backup_command(context, command) + elif 'gpdbrestore' in command: + command = append_storage_config_to_restore_command(context, command) run_gpcommand(context, command) @@ -538,20 +541,17 @@ def impl(context, command, options): ts = context.backup_timestamp bnr_tool = command.split()[0].strip() if bnr_tool == 'gp_dump': - command_str = append_storage_config_to_backup_command(context, command) + command_str = command elif bnr_tool == 'gp_dump_agent': command_str = command + ' -p %s' % port - command_str = append_storage_config_to_backup_command(context, command_str) elif bnr_tool == 'gp_restore': command_str = "%s %s --gp-k %s --gp-d db_dumps/%s --gp-r db_dumps/%s" % ( command, options, context.backup_timestamp, context.backup_timestamp[0:8], context.backup_timestamp[0:8]) - command_str = append_storage_config_to_restore_command(context, command_str) elif bnr_tool == 'gp_restore_agent': command_str = "%s %s --gp-k %s --gp-d db_dumps/%s -p %s -U %s --target-host localhost " \ "--target-port %s db_dumps/%s/gp_dump_-1_1_%s_post_data.gz" % ( command, options, ts, ts[0:8], port, user, port, ts[0:8], ts) - command_str = append_storage_config_to_restore_command(context, command_str) - + command_str = append_storage_config_to_restore_command(context, command_str) run_valgrind_command(context, command_str, "valgrind_suppression.txt") @@ -880,9 +880,10 @@ def impl(context, dirname): @then('verify that the incremental file has the stored timestamp') def impl(context): + dump_dir = get_dump_dir(context, "") inc_file_name = 'gp_dump_%s_increments' % context.full_backup_timestamp subdirectory = context.full_backup_timestamp[0:8] - full_path = os.path.join(master_data_dir, 'db_dumps', subdirectory, inc_file_name) + full_path = os.path.join(dump_dir, subdirectory, inc_file_name) if not os.path.isfile(full_path): raise Exception("Can not find increments file: %s" % full_path) @@ -897,9 +898,10 @@ def impl(context): def check_increments_file_for_list(context, location): + dump_dir = get_dump_dir(context, location) inc_file_name = 'gp_dump_%s_increments' % context.full_backup_timestamp subdirectory = context.full_backup_timestamp[0:8] - full_path = os.path.join(location, 'db_dumps', subdirectory, inc_file_name) + full_path = os.path.join(dump_dir, subdirectory, inc_file_name) if not os.path.isfile(full_path): raise Exception("Can not find increments file: %s" % full_path) @@ -939,7 +941,7 @@ def impl(context): def impl(context): context.inc_backup_timestamps = sorted(context.inc_backup_timestamps) latest_ts = context.inc_backup_timestamps[-1] - plan_file_dir = os.path.join(master_data_dir, 'db_dumps', latest_ts[0:8]) + plan_file_dir = get_dump_dir(context, master_data_dir) + '/' + latest_ts[0:8] plan_file_count = len(glob.glob('/%s/*%s*_plan' % (plan_file_dir, latest_ts))) if plan_file_count != 1: raise Exception('Expected only one plan file, found %s' % plan_file_count) @@ -961,17 +963,26 @@ def impl(context, subdir): raise Exception('Timestamp not found %s' % stdout) -@when('the state files are generated under "{dir}" for stored "{backup_type}" timestamp') -@then('the state files are generated under "{dir}" for stored "{backup_type}" timestamp') -def impl(context, dir, backup_type): - dump_dir = dir if len(dir.strip()) != 0 else master_data_dir +def get_dump_dir(context, directory): + dump_dir = directory.strip() if len(directory.strip()) != 0 else master_data_dir + if use_ddboost(): + dump_dir = os.path.join(dump_dir, context._root['ddboost_backupdir']) + else: + dump_dir = os.path.join(dump_dir, 'db_dumps') + return dump_dir + + +@when('the state files are generated under "{directory}" for stored "{backup_type}" timestamp') +@then('the state files are generated under "{directory}" for stored "{backup_type}" timestamp') +def impl(context, directory, backup_type): + dump_dir = get_dump_dir(context, directory) if backup_type == 'full': timestamp = context.full_backup_timestamp else: timestamp = context.backup_timestamp - ao_state_filename = "%s/db_dumps/%s/gp_dump_%s_ao_state_file" % (dump_dir, timestamp[0:8], timestamp) - co_state_filename = "%s/db_dumps/%s/gp_dump_%s_co_state_file" % (dump_dir, timestamp[0:8], timestamp) + ao_state_filename = "%s/%s/gp_dump_%s_ao_state_file" % (dump_dir, timestamp[0:8], timestamp) + co_state_filename = "%s/%s/gp_dump_%s_co_state_file" % (dump_dir, timestamp[0:8], timestamp) if not os.path.exists(ao_state_filename): raise Exception('AO state file %s not generated' % ao_state_filename) @@ -984,12 +995,12 @@ def impl(context, dir, backup_type): @then('the "{file_type}" files are generated under "{dirname}" for stored "{backup_type}" timestamp') def impl(context, file_type, dirname, backup_type): - dump_dir = dirname if len(dirname.strip()) != 0 else master_data_dir + dump_dir = get_dump_dir(context, dirname) if backup_type == 'full': timestamp = context.full_backup_timestamp else: timestamp = context.backup_timestamp - last_operation_filename = "%s/db_dumps/%s/gp_dump_%s_last_operation" % (dump_dir, timestamp[0:8], timestamp) + last_operation_filename = "%s/%s/gp_dump_%s_last_operation" % (dump_dir, timestamp[0:8], timestamp) if not os.path.exists(last_operation_filename): raise Exception('Last operation file %s not generated' % last_operation_filename) @@ -1163,18 +1174,17 @@ def impl(context, table_name, dbname): validate_table_data_on_segments(context, table_name, dbname) -@then('verify that the data of the {file} under "{backup_dir}" in "{dbname}" is validated after restore') -def impl(context, file, dbname, backup_dir): - dump_dir = backup_dir if len(backup_dir.strip()) != 0 else master_data_dir - - if file == 'dirty tables': - dirty_list_filename = '%s/db_dumps/%s/gp_dump_%s_dirty_list' % ( +@then('verify that the data of the {filename} under "{directory}" in "{dbname}" is validated after restore') +def impl(context, filename, dbname, directory): + dump_dir = get_dump_dir(context, directory) + if filename == 'dirty tables': + dirty_list_filename = '%s/%s/gp_dump_%s_dirty_list' % ( dump_dir, context.backup_timestamp[0:8], context.backup_timestamp) - elif file == 'table_filter_file': - dirty_list_filename = os.path.join(os.getcwd(), file) + elif filename == 'table_filter_file': + dirty_list_filename = os.path.join(os.getcwd(), filename) if not os.path.exists(dirty_list_filename): - raise Exception('Dirty list file %s does not exist' % dirty_list_filename) + raise Exception('Dirty list filename %s does not exist' % dirty_list_filename) with open(dirty_list_filename) as fd: tables = fd.readlines() @@ -1362,8 +1372,6 @@ def impl(context, table, dbname): def verify_file_contents(context, file_type, file_dir, text_find, should_contain=True): - if len(file_dir.strip()) == 0: - file_dir = master_data_dir if not hasattr(context, "dump_prefix"): context.dump_prefix = '' @@ -1385,12 +1393,13 @@ def verify_file_contents(context, file_type, file_dir, text_find, should_contain elif file_type == 'dump': fn = '%sgp_dump_*_1_%s.gz' % (context.dump_prefix, context.backup_timestamp) + file_dir = get_dump_dir(context, file_dir) subdirectory = context.backup_timestamp[0:8] if file_type == 'pg_dump_log': full_path = os.path.join(file_dir, fn) else: - full_path = glob.glob(os.path.join(file_dir, 'db_dumps', subdirectory, fn))[0] + full_path = glob.glob(os.path.join(file_dir, subdirectory, fn))[0] if not os.path.isfile(full_path): raise Exception("Can not find %s file: %s" % (file_type, full_path)) @@ -1481,8 +1490,8 @@ def impl(context, dbname): raise Exception('Dump file %s not found after gp_dump on host %s' % (dump_file, host)) -@then('"{filetype}" file should not be created under "{dir}"') -def impl(context, filetype, dir): +@then('"{filetype}" file should not be created under "{directory}"') +def impl(context, filetype, directory): if not hasattr(context, 'backup_timestamp'): raise Exception('Unable to find out the %s because backup timestamp has not been stored' % filetype) @@ -1497,8 +1506,8 @@ def impl(context, filetype, dir): else: raise Exception("Unknown filetype '%s' specified" % filetype) - dump_dir = dir if len(dir.strip()) != 0 else master_data_dir - file_path = os.path.join(dump_dir, 'db_dumps', context.backup_timestamp[0:8], filename) + dump_dir = get_dump_dir(context, directory) + file_path = os.path.join(dump_dir, context.backup_timestamp[0:8], filename) if os.path.exists(file_path): raise Exception("File path %s should not exist for filetype '%s'" % (file_path, filetype)) @@ -1610,8 +1619,8 @@ def impl(context, ts, numtables): raise Exception("Timestamp label '%s' not found in restore plan" % ts) -@then('"{filetype}" file is removed under "{dir}"') -def impl(context, filetype, dir): +@then('"{filetype}" file is removed under "{directory}"') +def impl(context, filetype, directory): if not hasattr(context, 'backup_timestamp'): raise Exception('Backup timestamp has not been stored') @@ -1627,17 +1636,16 @@ def impl(context, filetype, dir): filename = 'gp_dump_*_1_%s.gz' % context.backup_timestamp else: raise Exception("Unknown filetype '%s' specified" % filetype) - - dump_dir = dir if len(dir.strip()) != 0 else master_data_dir - file_path = glob.glob(os.path.join(dump_dir, 'db_dumps', context.backup_timestamp[0:8], filename))[0] + dump_dir = get_dump_dir(context, directory) + file_path = glob.glob(os.path.join(dump_dir, context.backup_timestamp[0:8], filename))[0] if os.path.exists(file_path): os.remove(file_path) -@when('"{filetype}" file should be created under "{dir}"') -@then('"{filetype}" file should be created under "{dir}"') -def impl(context, filetype, dir): +@when('"{filetype}" file should be created under "{directory}"') +@then('"{filetype}" file should be created under "{directory}"') +def impl(context, filetype, directory): if not hasattr(context, "dump_prefix"): context.dump_prefix = '' if not hasattr(context, 'backup_timestamp'): @@ -1662,13 +1670,11 @@ def impl(context, filetype, dir): else: raise Exception("Unknown filetype '%s' specified" % filetype) - dump_dir = dir.strip() if len(dir.strip()) != 0 else master_data_dir - file_path = glob.glob(os.path.join(dump_dir, 'db_dumps', context.backup_timestamp[0:8], '%s%s' % (context.dump_prefix, filename)))[0] - - if not os.path.exists(file_path): + dump_dir = get_dump_dir(context, directory) + file_path = glob.glob(os.path.join(dump_dir, context.backup_timestamp[0:8], '%s%s' % (context.dump_prefix, filename))) + if len(file_path) == 0 or not os.path.exists(file_path[0]): raise Exception("File path %s does not exist for filetype '%s'" % (file_path, filetype)) - @then('verify there are no "{tmp_file_prefix}" tempfiles') def impl(context, tmp_file_prefix): if tmp_file_prefix is not None and tmp_file_prefix: @@ -1785,7 +1791,7 @@ def impl(context, cname, ctype, defval, tname, dbname): @given('there is a fake timestamp for "{ts}"') def impl(context, ts): - dname = os.path.join(master_data_dir, 'db_dumps', ts[0:8]) + dname = os.path.join(get_dump_dir(context, master_data_dir), ts[0:8]) os.makedirs(dname) contents = """ @@ -1977,26 +1983,26 @@ def impl(context): raise Exception('Expected directory does not exist %s' % dump_dir) -def validate_master_config_backup_files(context, dir=master_data_dir): +def validate_master_config_backup_files(context, directory=master_data_dir): if not hasattr(context, "dump_prefix"): context.dump_prefix = '' - master_dump_dir = os.path.join(dir, 'db_dumps', context.backup_timestamp[0:8]) - dump_files = os.listdir(master_dump_dir) + dump_dir = os.path.join(get_dump_dir(context, directory), context.backup_timestamp[0:8]) + dump_files = os.listdir(dump_dir) for df in dump_files: if df.startswith('%sgp_master_config_files' % context.dump_prefix) and df.endswith('.tar'): return - raise Exception('Config files not backed up on master "%s"' % master_dump_dir) + raise Exception('Config files not backed up on master "%s"' % dump_dir) -def validate_segment_config_backup_files(context, dir=None): +def validate_segment_config_backup_files(context, directory=None): if not hasattr(context, "dump_prefix"): context.dump_prefix = '' gparray = GpArray.initFromCatalog(dbconn.DbURL()) primary_segs = [seg for seg in gparray.getDbList() if seg.isSegmentPrimary()] for ps in primary_segs: - seg_data_dir = dir if dir is not None else ps.getSegmentDataDirectory() - dump_dir = os.path.join(seg_data_dir, 'db_dumps', context.backup_timestamp[0:8]) + seg_data_dir = directory if directory is not None else ps.getSegmentDataDirectory() + dump_dir = os.path.join(get_dump_dir(context, seg_data_dir), context.backup_timestamp[0:8]) dump_files = ListRemoteFilesByPattern(dump_dir, '%sgp_segment_config_files_*_%d_%s.tar' % ( context.dump_prefix, ps.getSegmentDbId(), context.backup_timestamp), @@ -2014,13 +2020,13 @@ def impl(context): validate_segment_config_backup_files(context) -@then('config files should be backed up on all segments in directory "{dir}"') -def impl(context, dir): +@then('config files should be backed up on all segments in directory "{directory}"') +def impl(context, directory): if not hasattr(context, 'backup_timestamp'): raise Exception('Backup timestamp needs to be stored') - validate_master_config_backup_files(context, dir=dir) - validate_segment_config_backup_files(context, dir=dir) + validate_master_config_backup_files(context, directory=directory) + validate_segment_config_backup_files(context, directory=directory) @then('verify that the table "{table_name}" in "{dbname}" has dump info for the stored timestamp') @@ -3090,22 +3096,24 @@ def impl(context, directory, prefix): if not hasattr(context, "dump_prefix"): context.dump_prefix = '' dump_prefix = '%s_gp' % prefix.strip() - master_dump_dir = directory if len(directory.strip()) != 0 else master_data_dir + dump_dir = get_dump_dir(context, directory) + segment_dump_files = get_segment_dump_files(context, directory) - for seg, dump_files in segment_dump_files: - segment_dump_dir = directory if len(directory.strip()) != 0 else seg.getSegmentDataDirectory() - if len(dump_files) == 0: - raise Exception('Failed to find dump files on the segment %s under %s/db_dumps/%s' % ( - seg.getSegmentDataDirectory(), segment_dump_dir, context.backup_timestamp[0:8])) - for dump_file in dump_files: - if not dump_file.startswith(dump_prefix): - raise Exception( - 'Dump file %s on the segment %s under %s/db_dumps/%s does not start with required prefix %s' % ( - dump_file, seg.getSegmentDataDirectory(), segment_dump_dir, context.backup_timestamp[0:8], prefix)) + if not use_ddboost(): + for seg, dump_files in segment_dump_files: + segment_dump_dir = directory if len(directory.strip()) != 0 else seg.getSegmentDataDirectory() + if len(dump_files) == 0: + raise Exception('Failed to find dump files on the segment %s under %s/db_dumps/%s' % ( + seg.getSegmentDataDirectory(), segment_dump_dir, context.backup_timestamp[0:8])) + for dump_file in dump_files: + if not dump_file.startswith(dump_prefix): + raise Exception( + 'Dump file %s on the segment %s under %s/db_dumps/%s does not start with required prefix %s' % ( + dump_file, seg.getSegmentDataDirectory(), segment_dump_dir, context.backup_timestamp[0:8], prefix)) cmd = Command('check dump files', - 'ls %s/db_dumps/%s/*%s*' % (master_dump_dir, context.backup_timestamp[0:8], context.backup_timestamp)) + 'ls %s/%s/*%s*' % (dump_dir, context.backup_timestamp[0:8], context.backup_timestamp)) cmd.run(validateAfter=True) results = cmd.get_stdout().split('\n') @@ -4373,6 +4381,7 @@ def impl(context): And database "bkdb" is dropped and recreated And there are no backup files And the backup files in "/tmp" are deleted + And the DDBoost dump directory is deleted ''') @@ -4897,44 +4906,58 @@ def use_netbackup(): else: return False +def use_ddboost(): + if os.getenv('DDBOOST'): + return True + else: + return False + def append_storage_config_to_backup_command(context, command): if use_netbackup(): command += " --netbackup-service-host " + context._root['netbackup_service_host'] + " --netbackup-policy " + context._root['netbackup_policy'] + " --netbackup-schedule " + context._root['netbackup_schedule'] + elif use_ddboost(): + command += " --ddboost" return command def append_storage_config_to_restore_command(context, command): if use_netbackup(): command += " --netbackup-service-host " + context._root['netbackup_service_host'] + elif use_ddboost(): + command += " --ddboost" return command -@given('the netbackup storage params have been parsed') -def impl(context): - NETBACKUPDICT = defaultdict(dict) - NETBACKUPDICT['NETBACKUPINFO'] = parse_netbackup_params() - context._root['netbackup_service_host'] = NETBACKUPDICT['NETBACKUPINFO']['NETBACKUP_PARAMS']['NETBACKUP_SERVICE_HOST'] - context._root['netbackup_policy'] = NETBACKUPDICT['NETBACKUPINFO']['NETBACKUP_PARAMS']['NETBACKUP_POLICY'] - context._root['netbackup_schedule'] = NETBACKUPDICT['NETBACKUPINFO']['NETBACKUP_PARAMS']['NETBACKUP_SCHEDULE'] - -def parse_netbackup_params(): - current_path = os.path.realpath(__file__) - current_dir = os.path.dirname(current_path) - netbackup_yaml_file_path = os.path.join(current_dir, 'data/netbackup_behave_config.yaml') - try: - nbufile = open(netbackup_yaml_file_path, 'r') - except IOError,e: - raise Exception("Unable to open file %s: %s" % (netbackup_yaml_file_path, e)) - try: - nbudata = yaml.load(nbufile.read()) - except yaml.YAMLError, exc: - raise Exception("Error reading file %s: %s" % (netbackup_yaml_file_path, exc)) - finally: - nbufile.close() - - if len(nbudata) == 0: - raise Exception("The load of the config file %s failed.\ - No configuration information to continue testing operation." % netbackup_yaml_file_path) - else: - return nbudata +def parse_config_params(): + if use_netbackup(): + current_path = os.path.realpath(__file__) + current_dir = os.path.dirname(current_path) + netbackup_yaml_file_path = os.path.join(current_dir, 'data/netbackup_behave_config.yaml') + config_yaml = read_config_yaml(netbackup_yaml_file_path) + elif use_ddboost(): + mdd = os.getenv('MASTER_DATA_DIRECTORY') + ddboost_yaml_file_path = os.path.join(mdd,'ddboost_config.yml') + config_yaml = read_config_yaml(ddboost_yaml_file_path) + return config_yaml + +def ddboost_config_setup(context, storage_unit=None): + cmd_remove_config = "gpcrondump --ddboost-config-remove" + print "context is %s" % context + print "cmd is %s" % cmd_remove_config + run_command(context, cmd_remove_config) + + cmd_config = "gpcrondump --ddboost-host %s --ddboost-user %s --ddboost-backupdir %s" % \ + (context._root['ddboost_host'], \ + context._root['ddboost_user'], \ + context._root['ddboost_backupdir']) + + if storage_unit: + cmd_config += " --ddboost-storage-unit %s" % storage_unit + + cmd_config + local = pexpect.spawn(cmd_config) + local.expect('Password: ') + local.sendline(context._root['ddboost_password']) + local.expect(pexpect.EOF) + local.close() def _copy_nbu_lib_files(context, ver, gphome): ver = ver.replace('.', '') @@ -4948,9 +4971,88 @@ def _copy_nbu_lib_files(context, ver, gphome): remoteHost=host) cmd.run(validateAfter=True) +def read_config_yaml(yaml_file): + """ Reads in a yaml file. """ + + try: + cfgfile = open(yaml_file, 'r') + except IOError as e: + raise Exception("Unable to open file %s: %s" % (yaml_file, e)) + + try: + cfgyamldata = yaml.load(cfgfile.read()) + except yaml.YAMLError, exc: + raise Exception("Error reading file %s: %s" % (yaml_file, exc)) + finally: + cfgfile.close() + + if len(cfgyamldata) == 0: + raise Exception("The load of the config file %s failed.\ + No configuration information to continue testing operation." % yaml_file) + else: + return cfgyamldata + @given('the test suite is initialized for Netbackup "{ver}"') def impl(context, ver): gphome = os.environ.get('GPHOME') _copy_nbu_lib_files(context=context, ver=ver, gphome=gphome) os.environ["NETBACKUP"] = "TRUE" + NETBACKUPDICT = defaultdict(dict) + NETBACKUPDICT['NETBACKUPINFO'] = parse_config_params() + context._root['netbackup_service_host'] = NETBACKUPDICT['NETBACKUPINFO']['NETBACKUP_PARAMS']['NETBACKUP_SERVICE_HOST'] + context._root['netbackup_policy'] = NETBACKUPDICT['NETBACKUPINFO']['NETBACKUP_PARAMS']['NETBACKUP_POLICY'] + context._root['netbackup_schedule'] = NETBACKUPDICT['NETBACKUPINFO']['NETBACKUP_PARAMS']['NETBACKUP_SCHEDULE'] + +@given('the test suite is initialized for DDBoost') +def impl(context): + os.environ["DDBOOST"] = "TRUE" + DDBOOSTDICT = defaultdict(dict) + DDBOOSTDICT['DDBOOSTINFO'] = parse_config_params() + context._root['ddboost_host'] = DDBOOSTDICT['DDBOOSTINFO']['DDBOOST_HOST'] + context._root['ddboost_user'] = DDBOOSTDICT['DDBOOSTINFO']['DDBOOST_USER'] + context._root['ddboost_password'] = DDBOOSTDICT['DDBOOSTINFO']['DDBOOST_PASSWORD'] + if 'ddboost_backupdir' not in context._root: + directory = os.getenv('PULSE_PROJECT', default='GPDB') + os.getenv('PULSE_BUILD_VERSION', default='') + os.getenv('PULSE_STAGE', default='') + '_DIR' + context._root['ddboost_backupdir'] = directory + ddboost_config_setup(context, storage_unit="GPDB") + +@given('the DDBoost dump directory is deleted') +def impl(context): + if use_ddboost(): + cmd_del_dir = "gpddboost --del-dir=%s" % context._root['ddboost_backupdir'] + run_command(context, cmd_del_dir) + +@then('gpcrondump should print the correct disk space check message') +def impl(context): + if use_ddboost(): + check_stdout_msg(context, "Bypassing disk space checks due to DDBoost parameters") + else: + check_stdout_msg(context, "Validating disk space") + +@then('store the vacuum timestamp for verification in database "{dbname}"') +def impl(context, dbname): + sleep(2) + res = execute_sql_singleton(dbname, 'select last_vacuum from pg_stat_all_tables where last_vacuum is not null order by last_vacuum desc limit 1') + context.vacuum_timestamp = res + +@then('verify that vacuum has been run in database "{dbname}"') +def impl(context, dbname): + sleep(2) + res = execute_sql_singleton(dbname, 'select last_vacuum from pg_stat_all_tables where last_vacuum is not null order by last_vacuum desc limit 1') + if res == context.vacuum_timestamp: + raise Exception ("Vacuum did not occur as expected. The last_vacuum timestamp %s has not changed" % context.vacuum_timestamp) + +@then('the "{utility}" log file should exist under "{directory}"') +def impl(context, utility, directory): + filepath = glob.glob(os.path.join(directory, utility+"*")) + if not os.path.isfile(filepath[0]): + err_str = "The output file '%s' does not exist.\n" % filepath + raise Exception(err_str) + +@then('no dump files should be present on the data domain server') +def impl(context): + command = 'gpddboost --listDirectory --dir=%s' % (os.path.join(context._root['ddboost_backupdir'], context.full_backup_timestamp[0:8])) + run_gpcommand(context, command) + if not context.exception: + raise Exception("Directory for date %s still exists" % context.full_backup_timestamp[0:8]) diff --git a/gpMgmt/test/behave_utils/utils.py b/gpMgmt/test/behave_utils/utils.py index dfd4afa03a..38fdb46576 100644 --- a/gpMgmt/test/behave_utils/utils.py +++ b/gpMgmt/test/behave_utils/utils.py @@ -1011,7 +1011,12 @@ def get_backup_dirs_for_hosts(dbname='template1'): def cleanup_backup_files(context, dbname, location=None): dir_map = get_backup_dirs_for_hosts(dbname) for host in dir_map: - if location: + + if os.getenv('DDBOOST'): + ddboost_dir = context._root['ddboost_backupdir'] + cmd_str = "ssh %s 'for DIR in %s; do if [ -d \"$DIR/%s\" ]; then rm -rf $DIR/%s $DIR/gpcrondump.pid; fi; done'" + cmd = cmd_str % (host, " ".join(dir_map[host]), ddboost_dir, ddboost_dir) + elif location: cmd_str = "ssh %s 'DIR=%s;if [ -d \"$DIR/db_dumps/\" ]; then rm -rf $DIR/db_dumps $DIR/gpcrondump.pid; fi'" cmd = cmd_str % (host, location) else: -- GitLab