提交 c47ad451 编写于 作者: H Heikki Linnakangas

Remove silly TINC tests.

The main ExplainAnalyzeTestCase tests in here were running standard
TPC-H-like test queries, and checking that the EXPLAIN ANALYZE output
contains a "Memory: " line for every plan node. That seems a bit excessive,
because looking at how those lines are printed, there is no reason to
believe that you might have a Memory line for some nodes but not others.
I don't think we need to test for that.

In order to still have minimal coverage for the explain_memory_verbosity
GUC, add a small test case to the main regression suite for that.

The mpp20785 test was testing for an old bug where you got an out of
memory error with this query. However, the test was broken, because the
test schema was nowhere to be found, so it simply resulted in a "schema not
found" error. Perhaps we could put it back if we could find the original
schema somewhere, but it's useless as it is.
上级 6fb8aebb
-- Helper function, to return the EXPLAIN ANALYZE output of a query as a normal
-- result set, so that you can manipulate it further.
create or replace function get_explain_output(explain_query text) returns setof text as
$$
declare
explainrow text;
begin
for explainrow in execute 'EXPLAIN ANALYZE ' || explain_query
loop
return next explainrow;
end loop;
end;
$$ language plpgsql;
--
-- Test explain_memory_verbosity option
--
CREATE TABLE explaintest (id int4);
NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'id' as the Greenplum Database data distribution key for this table.
HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew.
INSERT INTO explaintest SELECT generate_series(1, 10);
EXPLAIN ANALYZE SELECT * FROM explaintest;
QUERY PLAN
----------------------------------------------------------------------------------------------------------------------------------------
Gather Motion 3:1 (slice1; segments: 3) (cost=0.00..3.10 rows=10 width=4)
Rows out: 10 rows at destination with 0.332 ms to first row, 0.345 ms to end, start offset by 0.283 ms.
-> Seq Scan on explaintest (cost=0.00..3.10 rows=4 width=4)
Rows out: Avg 3.3 rows x 3 workers. Max 5 rows (seg1) with 0.015 ms to first row, 0.018 ms to end, start offset by 0.543 ms.
Slice statistics:
(slice0) Executor memory: 318K bytes.
(slice1) Executor memory: 50K bytes avg x 3 workers, 50K bytes max (seg0).
Statement statistics:
Memory used: 128000K bytes
Total runtime: 0.998 ms
(10 rows)
set explain_memory_verbosity='summary';
-- The plan should consist of a Gather and a Seq Scan, with a
-- "Memory: ..." line on both nodes.
SELECT COUNT(*) from
get_explain_output($$
SELECT * FROM explaintest;
$$) as et
WHERE et like '%Memory: %';
count
-------
2
(1 row)
......@@ -19,7 +19,7 @@ test: variadic_parameters default_parameters function_extensions
test: spi xpath
test: leastsquares opr_sanity_gp decode_expr bitmapscan bitmapscan_ao case_gp limit_gp notin percentile naivebayes join_gp union_gp gpcopy gp_create_table
test: filter gpctas gpdist matrix toast sublink table_functions olap_setup complex opclass_ddl information_schema guc_env_var
test: filter gpctas gpdist matrix toast sublink table_functions olap_setup complex opclass_ddl information_schema guc_env_var gp_explain
test: bitmap_index gp_dump_query_oids analyze
test: indexjoin as_alias regex_gp gpparams with_clause transient_types gp_rules
# dispatch should always run seperately from other cases.
......
-- Helper function, to return the EXPLAIN ANALYZE output of a query as a normal
-- result set, so that you can manipulate it further.
create or replace function get_explain_output(explain_query text) returns setof text as
$$
declare
explainrow text;
begin
for explainrow in execute 'EXPLAIN ANALYZE ' || explain_query
loop
return next explainrow;
end loop;
end;
$$ language plpgsql;
--
-- Test explain_memory_verbosity option
--
CREATE TABLE explaintest (id int4);
INSERT INTO explaintest SELECT generate_series(1, 10);
EXPLAIN ANALYZE SELECT * FROM explaintest;
set explain_memory_verbosity='summary';
-- The plan should consist of a Gather and a Seq Scan, with a
-- "Memory: ..." line on both nodes.
SELECT COUNT(*) from
get_explain_output($$
SELECT * FROM explaintest;
$$) as et
WHERE et like '%Memory: %';
-- start_ignore
SET explain_memory_verbosity=detail;
SET
-- end_ignore
-- @author ramans2
-- @created 2014-03-14 12:00:00
-- @modified 2014-03-14 12:00:00
-- @gpdiff True
-- @description Check segment logs for ERROR/PANIC messages
-- SQL to check segment logs for ERROR or PANIC messages
select logseverity, logstate, logmessage from gp_toolkit.__gp_log_segment_ext where logstate = 'XX000' and logtime >= (select logtime from gp_toolkit.__gp_log_master_ext where logmessage like 'statement: select 20785 as explain_test;'order by logtime desc limit 1) order by logtime desc limit 1;
logseverity | logstate | logmessage
-------------+----------+------------
(0 rows)
-- @author balasr3
-- @description query01
-- @created 2012-07-26 22:04:56
-- @modified 2012-07-26 22:04:56
explain analyze select
l_returnflag,
l_linestatus,
sum(l_quantity) as sum_qty,
sum(l_extendedprice) as sum_base_price,
sum(l_extendedprice * (1 - l_discount)) as sum_disc_price,
sum(l_extendedprice * (1 - l_discount) * (1 + l_tax)) as sum_charge,
avg(l_quantity) as avg_qty,
avg(l_extendedprice) as avg_price,
avg(l_discount) as avg_disc,
count(*) as count_order
from
lineitem
where
l_shipdate <= date '1998-12-01' - interval '114 day'
group by
l_returnflag,
l_linestatus
order by
l_returnflag,
l_linestatus;
-- @author balasr3
-- @description query02
-- @created 2012-07-26 22:04:56
-- @modified 2012-07-26 22:04:56
explain analyze select
s_acctbal,
s_name,
n_name,
p_partkey,
p_mfgr,
s_address,
s_phone,
s_comment
from
part,
supplier,
partsupp,
nation,
region
where
p_partkey = ps_partkey
and s_suppkey = ps_suppkey
and p_size = 42
and p_type like '%NICKEL'
and s_nationkey = n_nationkey
and n_regionkey = r_regionkey
and r_name = 'MIDDLE EAST'
and ps_supplycost = (
select
min(ps_supplycost)
from
partsupp, supplier,
nation, region
where
p_partkey = ps_partkey
and s_suppkey = ps_suppkey
and s_nationkey = n_nationkey
and n_regionkey = r_regionkey
and r_name = 'MIDDLE EAST'
)
order by
s_acctbal desc,
n_name,
s_name,
p_partkey
LIMIT 100;
-- @author balasr3
-- @description query03
-- @created 2012-07-26 22:04:56
-- @modified 2012-07-26 22:04:56
explain analyze select
l_orderkey,
sum(l_extendedprice * (1 - l_discount)) as revenue,
o_orderdate,
o_shippriority
from
customer,
orders,
lineitem
where
c_mktsegment = 'BUILDING'
and c_custkey = o_custkey
and l_orderkey = o_orderkey
and o_orderdate < date '1995-03-26'
and l_shipdate > date '1995-03-26'
group by
l_orderkey,
o_orderdate,
o_shippriority
order by
revenue desc,
o_orderdate
LIMIT 10;
-- @author balasr3
-- @description query04
-- @created 2012-07-26 22:04:56
-- @modified 2012-07-26 22:04:56
explain analyze select
o_orderpriority,
count(*) as order_count
from
orders
where
o_orderdate >= date '1996-03-01'
and o_orderdate < date '1996-03-01' + interval '3 month'
and exists (
select
*
from
lineitem
where
l_orderkey = o_orderkey
and l_commitdate < l_receiptdate
)
group by
o_orderpriority
order by
o_orderpriority;
-- @author balasr3
-- @description query10
-- @created 2012-07-26 22:04:56
-- @modified 2012-07-26 22:04:56
-- @tags orca
explain analyze select
c_custkey,
c_name,
sum(l_extendedprice * (1 - l_discount)) as revenue,
c_acctbal,
n_name,
c_address,
c_phone,
c_comment
from
customer,
orders,
lineitem,
nation
where
c_custkey = o_custkey
and l_orderkey = o_orderkey
and o_orderdate >= date '1993-07-01'
and o_orderdate < date '1993-07-01' + interval '3 month'
and l_returnflag = 'R'
and c_nationkey = n_nationkey
group by
c_custkey,
c_name,
c_acctbal,
c_phone,
n_name,
c_address,
c_comment
order by
revenue desc
LIMIT 20;
-- @author ramans2
-- @created 2014-03-14 12:00:00
-- @modified 2014-03-14 12:00:00
-- @gpdiff True
-- @description Check segment logs for ERROR/PANIC messages
-- @product_version gpdb:[4.3.0.0-MAIN]
-- SQL to check segment logs for ERROR or PANIC messages
select logseverity, logstate, logmessage from gp_toolkit.__gp_log_segment_ext where logstate = 'XX000' and logtime >= (select logtime from gp_toolkit.__gp_log_master_ext where logmessage like 'statement: select 20785 as explain_test;'order by logtime desc limit 1) order by logtime desc limit 1;
from mpp.models import SQLTestCase
class ExplainAnalyzeTestCase(SQLTestCase):
"""
@product_version gpdb:[4.3.0.0-MAIN], hawq: [1.2.1.0-]
@db_name memory_accounting
@gpdiff False
@gucs explain_memory_verbosity=summary
"""
sql_dir = 'sql/'
out_dir = 'output/'
def verify_out_file(self, out_file, ans_file):
"""
Override SQLTestCase, verify explain analyze output to check that all operators report memory usage and usage is positive
"""
f = open(out_file)
regex = re.compile('-?\d+K bytes')
operators = 0
usage = 0
for line in f:
if "->" in line:
operators += 1
if "Memory: " in line:
usage += 1
memory_usage = regex.findall(line)
# Verify that Peak memory in slice statistics <= Vmem reserved
peak_obj = re.search( r'(.*) Peak memory: (.*?)K bytes .*', line, re.M|re.I)
vmem_obj = re.search( r'(.*) Vmem reserved: (.*?)K bytes .*', line, re.M|re.I)
if peak_obj and vmem_obj:
peak = peak_obj.group(2)
vmem = vmem_obj.group(2)
# Disabling this check. Please see MPP-23072 for details
#self.failUnless( int(peak) <= int(vmem), 'Peak memory is greater than Vmem reserved! Peak memory: ' + peak + '; Vmem reserved: ' + vmem)
for word in memory_usage:
# Verify that memory usage is always positive
self.failUnless( int(word.rstrip('K bytes')) > 0, 'Memory usage is not a postive number' )
f.close()
# Verify that all operators in plan report memory usage
self.failUnless( usage == operators+1, 'Memory usage is not reported by all the operators' )
class ExplainAnalyzeTestCase_Detail(SQLTestCase):
"""
@db_name memory_accounting
@gucs explain_memory_verbosity=detail
"""
sql_dir = 'sql_detail/'
ans_dir = 'expected/'
out_dir = 'output/'
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册