diff --git a/concourse/pipelines/gpdb_master-generated.yml b/concourse/pipelines/gpdb_master-generated.yml index 80150932c2275e9d29f9c8aff81b30f0b766ecae..098267bba7c6f43cba6ab4ac4f6c192849838af0 100644 --- a/concourse/pipelines/gpdb_master-generated.yml +++ b/concourse/pipelines/gpdb_master-generated.yml @@ -12,7 +12,7 @@ ## file (example: templates/gpdb-tpl.yml) and regenerate the pipeline ## using appropriate tool (example: gen_pipeline.py -t prod). ## ---------------------------------------------------------------------- -## Generated by gen_pipeline.py at: 2020-08-05 10:51:07.537278 +## Generated by gen_pipeline.py at: 2020-08-11 13:55:03.622314 ## Template file: gpdb-tpl.yml ## OS Types: ['centos6', 'centos7', 'ubuntu18.04', 'win'] ## Test Sections: ['ICW', 'Replication', 'ResourceGroups', 'Interconnect', 'CLI', 'UD', 'Extensions'] @@ -202,6 +202,8 @@ groups: - gppkg_ubuntu18 - analyzedb - analyzedb_ubuntu18 + - gpreload + - gpreload_ubuntu18 - gpinitsystem - gpinitsystem_ubuntu18 - gpstate @@ -302,6 +304,8 @@ groups: - gppkg_ubuntu18 - analyzedb - analyzedb_ubuntu18 + - gpreload + - gpreload_ubuntu18 - gpinitsystem - gpinitsystem_ubuntu18 - gpstate @@ -1465,6 +1469,73 @@ jobs: BEHAVE_FLAGS: --tags=analyzedb --tags=~concourse_cluster,demo_cluster +- name: gpreload + plan: + - in_parallel: + steps: + - get: gpdb_src + params: + submodules: + - gpMgmt/bin/pythonSrc/ext + passed: [gate_cli_start] + - get: gpdb7-centos7-test + - in_parallel: + steps: + - do: + - get: bin_gpdb + resource: bin_gpdb_centos7 + passed: [gate_cli_start] + trigger: true + - task: gpreload_demo_cluster_tests + file: gpdb_src/concourse/tasks/behave_gpdb.yml + image: gpdb7-centos7-test + params: + BEHAVE_FLAGS: --tags=gpreload --tags=~concourse_cluster,demo_cluster + TEST_NAME: gpreload + + output_mapping: + coverage: demo-coverage + - task: publish_demo_coverage + image: gpdb7-centos7-test + config: + platform: linux + inputs: + - name: gpdb_src + - name: demo-coverage + run: + path: gpdb_src/concourse/scripts/gsutil_sync + args: [ "./demo-coverage/", "gs://((coverage-bucket-name))/((pipeline-name))/" ] + params: + JSON_KEY: ((concourse-gcs-resources-service-account-key)) + caches: + - path: pip-cache-dir + +- name: gpreload_ubuntu18 + plan: + - in_parallel: + steps: + - get: gpdb_src + params: + submodules: + - gpMgmt/bin/pythonSrc/ext + passed: [compile_gpdb_ubuntu18.04] + - get: gpdb7-ubuntu18.04-test + - in_parallel: + steps: + - do: + - get: bin_gpdb_ubuntu18.04 + resource: bin_gpdb_ubuntu18.04 + passed: [compile_gpdb_ubuntu18.04] + trigger: true + - task: gpreload_demo_cluster_tests + file: gpdb_src/concourse/tasks/behave_gpdb.yml + image: gpdb7-ubuntu18.04-test + input_mapping: + bin_gpdb: bin_gpdb_ubuntu18.04 + params: + BEHAVE_FLAGS: --tags=gpreload --tags=~concourse_cluster,demo_cluster + + - name: gpinitsystem plan: - in_parallel: @@ -3054,6 +3125,7 @@ jobs: - gpmovemirrors - gppkg - analyzedb + - gpreload - gpinitsystem - gpstate - replication_slots @@ -3210,6 +3282,8 @@ jobs: - gppkg_ubuntu18 - analyzedb - analyzedb_ubuntu18 + - gpreload + - gpreload_ubuntu18 - gpinitsystem - gpinitsystem_ubuntu18 - gpstate @@ -3256,6 +3330,7 @@ jobs: - gpmovemirrors - gppkg - analyzedb + - gpreload - gpinitsystem - gpstate - replication_slots @@ -3287,6 +3362,7 @@ jobs: - gpmovemirrors_ubuntu18 - gppkg_ubuntu18 - analyzedb_ubuntu18 + - gpreload_ubuntu18 - gpinitsystem_ubuntu18 - gpstate_ubuntu18 - replication_slots_ubuntu18 diff --git a/concourse/pipelines/templates/gpdb-tpl.yml b/concourse/pipelines/templates/gpdb-tpl.yml index 1c45c11c7ac99d3617e63b8a6c0a4417f57ef33c..1259c3b5570e0da407ebe815b794ad427a20f7de 100644 --- a/concourse/pipelines/templates/gpdb-tpl.yml +++ b/concourse/pipelines/templates/gpdb-tpl.yml @@ -6,6 +6,8 @@ 'use_concourse_cluster': true}, {'name': 'analyzedb', 'use_concourse_cluster': false}, + {'name': 'gpreload', + 'use_concourse_cluster': false}, {'name': 'gpinitsystem', 'use_concourse_cluster': false}, {'name': 'gpstate', diff --git a/gpMgmt/bin/gppylib/operations/reload.py b/gpMgmt/bin/gppylib/operations/reload.py index 93a64bea8741e21338defdb678281cdebf8a6b22..39d927dd81ecfa77bf74e4dd792f9370abbade15 100644 --- a/gpMgmt/bin/gppylib/operations/reload.py +++ b/gpMgmt/bin/gppylib/operations/reload.py @@ -29,7 +29,9 @@ class GpReload: """SELECT count(*) FROM pg_class, pg_namespace WHERE pg_namespace.nspname = '{schema}' - AND pg_class.relname = '{table}'""".format(schema=schema_name, table=table_name)) + AND pg_class.relname = '{table}' + AND pg_class.relnamespace = pg_namespace.oid + AND pg_class.relkind != 'v'""".format(schema=schema_name, table=table_name)) if not c: raise ExceptionNoStackTraceNeeded('Table {schema}.{table} does not exist' .format(schema=schema_name, table=table_name)) @@ -45,7 +47,9 @@ class GpReload: FROM pg_attribute WHERE attrelid = (SELECT pg_class.oid FROM pg_class, pg_namespace - WHERE pg_class.relname = '{table}' AND pg_namespace.nspname = '{schema}')""" + WHERE pg_class.relname = '{table}' AND pg_namespace.nspname = '{schema}' + AND pg_class.relnamespace = pg_namespace.oid + AND pg_class.relkind != 'v')""" .format(table=table_name, schema=schema_name)) for cols in res.fetchall(): columns.append(cols[0].strip()) @@ -162,8 +166,7 @@ class GpReload: FROM pg_index WHERE indrelid = (SELECT pg_class.oid FROM pg_class, pg_namespace - WHERE pg_class.relname='{table}' AND pg_namespace.nspname='{schema}') - """.format(table=table_name, schema=schema_name)) + WHERE pg_class.relname='{table}' AND pg_namespace.nspname='{schema}' AND pg_class.relnamespace = pg_namespace.oid)""".format(table=table_name, schema=schema_name)) if c != 0: if self.interactive: return ask_yesno(None, diff --git a/gpMgmt/test/behave/mgmt_utils/environment.py b/gpMgmt/test/behave/mgmt_utils/environment.py index 08ed9425f99adfc924566646b0a33b3d2b204c48..dfad3c66b69c18a4b0dad0eee9f095744dfba9b4 100644 --- a/gpMgmt/test/behave/mgmt_utils/environment.py +++ b/gpMgmt/test/behave/mgmt_utils/environment.py @@ -45,6 +45,13 @@ def before_feature(context, feature): And there is a hard coded ao partition table "sales" with 4 child partitions in schema "public" """) + if 'gpreload' in feature.tags: + start_database_if_not_started(context) + drop_database_if_exists(context, 'gpreload_db') + create_database(context, 'gpreload_db') + context.conn = dbconn.connect(dbconn.DbURL(dbname='gpreload_db'), unsetSearchPath=False) + context.dbname = 'gpreload_db' + if 'minirepro' in feature.tags: start_database_if_not_started(context) minirepro_db = 'minireprodb' @@ -71,6 +78,8 @@ def before_feature(context, feature): def after_feature(context, feature): if 'analyzedb' in feature.tags: context.conn.close() + if 'gpreload' in feature.tags: + context.conn.close() if 'minirepro' in feature.tags: context.conn.close() if 'gpconfig' in feature.tags: diff --git a/gpMgmt/test/behave/mgmt_utils/gpreload.feature b/gpMgmt/test/behave/mgmt_utils/gpreload.feature new file mode 100644 index 0000000000000000000000000000000000000000..dcd4ef59cb1d0f91f758469c89f6bafd6bec3b05 --- /dev/null +++ b/gpMgmt/test/behave/mgmt_utils/gpreload.feature @@ -0,0 +1,11 @@ +@gpreload +Feature: gpreload behave tests + + Scenario: Handle case where a table (public.t1) and view (gpreload_schema.t1) have the same name + Given schema "gpreload_schema" exists in "gpreload_db" + And there is a regular "heap" table "t1" with column name list "a,b" and column type list "int,int" in schema "public" + And some data is inserted into table "t1" in schema "public" with column type list "int,int" + And a view "gpreload_schema.t1" exists on table "t1" + When the user runs command "echo 'public.t1: a, b' > gpreload_config_file" + And the user runs "gpreload -d gpreload_db -t gpreload_config_file" + Then gpreload should return a return code of 0 diff --git a/gpMgmt/test/behave/mgmt_utils/steps/analyzedb_mgmt_utils.py b/gpMgmt/test/behave/mgmt_utils/steps/analyzedb_mgmt_utils.py index 7b8c434faf0ed32d3c9b79407f9476f78d81c350..e83b083a036ea8a243fa8bab1f3941b3ab3caa19 100644 --- a/gpMgmt/test/behave/mgmt_utils/steps/analyzedb_mgmt_utils.py +++ b/gpMgmt/test/behave/mgmt_utils/steps/analyzedb_mgmt_utils.py @@ -34,6 +34,7 @@ DEFAULT PARTITION default_dates); """ + @given('there is a regular "{storage_type}" table "{tablename}" with column name list "{col_name_list}" and column type list "{col_type_list}" in schema "{schemaname}"') def impl(context, storage_type, tablename, col_name_list, col_type_list, schemaname): schemaname_no_quote = schemaname @@ -85,7 +86,13 @@ def impl(context, number, dbname): @given('a view "{view_name}" exists on table "{table_name}" in schema "{schema_name}"') def impl(context, view_name, table_name, schema_name): with closing(dbconn.connect(dbconn.DbURL(dbname=context.dbname))) as conn: - create_view_on_table(conn, schema_name, table_name, view_name) + create_view_on_table_in_schema(conn, schema_name, table_name, view_name) + + +@given('a view "{view_name}" exists on table "{table_name}"') +def impl(context, view_name, table_name): + with closing(dbconn.connect(dbconn.DbURL(dbname=context.dbname))) as conn: + create_view_on_table(context.conn, view_name, table_name) @given('"{qualified_table}" appears in the latest state files') @@ -386,6 +393,7 @@ def create_table_with_column_list(conn, storage_type, schemaname, tablename, col query = 'CREATE TABLE %s.%s %s %s DISTRIBUTED RANDOMLY' % (schemaname, tablename, col_list, storage_str) dbconn.execSQL(conn, query) + conn.commit() def insert_data_into_table(conn, schemaname, tablename, col_type_list, num_rows="100"): @@ -393,6 +401,7 @@ def insert_data_into_table(conn, schemaname, tablename, col_type_list, num_rows= col_str = ','.join(["(random()*i)::%s" % x for x in col_type_list]) query = "INSERT INTO " + schemaname + '.' + tablename + " SELECT " + col_str + " FROM generate_series(1," + num_rows + ") i" dbconn.execSQL(conn, query) + conn.commit() def perform_ddl_on_table(conn, schemaname, tablename): @@ -402,7 +411,15 @@ def perform_ddl_on_table(conn, schemaname, tablename): dbconn.execSQL(conn, query) -def create_view_on_table(conn, schemaname, tablename, viewname): +def create_view_on_table_in_schema(conn, schemaname, tablename, viewname): query = "CREATE OR REPLACE VIEW " + schemaname + "." + viewname + \ " AS SELECT * FROM " + schemaname + "." + tablename dbconn.execSQL(conn, query) + conn.commit() + + +def create_view_on_table(conn, viewname, tablename): + query = "CREATE OR REPLACE VIEW " + viewname + \ + " AS SELECT * FROM " + tablename + dbconn.execSQL(conn, query) + conn.commit()