提交 6a33d211 编写于 作者: H Heikki Linnakangas 提交者: Jimmy Yih

Remove redundant tests.

We already have the exact same tests in the normal regression suite, as
src/test/regress/sql/DML_over_joins.sql.
上级 fcbfd412
--
-- @created 2009-01-27 14:00:00
-- @modified 2013-06-24 17:00:00
-- @tags ddl schema_topology
-- @description Joins
-- start_ignore
DROP TABLE IF EXISTS update_test;
psql:/path/sql_file:1: NOTICE: table "update_test" does not exist, skipping
DROP TABLE
DROP TABLE IF EXISTS t1;
psql:/path/sql_file:1: NOTICE: table "t1" does not exist, skipping
DROP TABLE
DROP TABLE IF EXISTS t2;
psql:/path/sql_file:1: NOTICE: table "t2" does not exist, skipping
DROP TABLE
-- end_ignore
CREATE TABLE update_test (
e INT DEFAULT 1,
a INT DEFAULT 10,
b INT,
c TEXT
);
psql:/path/sql_file:1: NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'e' as the Greenplum Database data distribution key for this table.
HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew.
CREATE TABLE
INSERT INTO update_test(a,b,c) VALUES (5, 10, 'foo');
INSERT 0 1
INSERT INTO update_test(b,a) VALUES (15, 10);
INSERT 0 1
SELECT a,b,c FROM update_test ORDER BY a,b;
a | b | c
----+----+-----
5 | 10 | foo
10 | 15 |
(2 rows)
UPDATE update_test SET a = DEFAULT, b = DEFAULT;
UPDATE 2
SELECT a,b,c FROM update_test ORDER BY a,c;
a | b | c
----+---+-----
10 | | foo
10 | |
(2 rows)
-- aliases for the UPDATE target table
UPDATE update_test AS t SET b = 10 WHERE t.a = 10;
UPDATE 2
SELECT a,b,c FROM update_test ORDER BY a,c;
a | b | c
----+----+-----
10 | 10 | foo
10 | 10 |
(2 rows)
UPDATE update_test t SET b = t.b + 10 WHERE t.a = 10;
UPDATE 2
SELECT a,b,c FROM update_test ORDER BY a,c;
a | b | c
----+----+-----
10 | 20 | foo
10 | 20 |
(2 rows)
UPDATE update_test SET a=v.i FROM (VALUES(100, 20)) AS v(i, j)
WHERE update_test.b = v.j;
UPDATE 2
SELECT a,b,c FROM update_test ORDER BY a,c;
a | b | c
-----+----+-----
100 | 20 | foo
100 | 20 |
(2 rows)
-- ----------------------------------------------
-- Create 2 tables with the same columns, but distributed differently.
CREATE TABLE t1 (id INTEGER, data1 INTEGER, data2 INTEGER) DISTRIBUTED BY (id);
CREATE TABLE
CREATE TABLE t2 (id INTEGER, data1 INTEGER, data2 INTEGER) DISTRIBUTED BY (data1);
CREATE TABLE
INSERT INTO t1 (id, data1, data2) VALUES (1, 1, 1);
INSERT 0 1
INSERT INTO t1 (id, data1, data2) VALUES (2, 2, 2);
INSERT 0 1
INSERT INTO t1 (id, data1, data2) VALUES (3, 3, 3);
INSERT 0 1
INSERT INTO t1 (id, data1, data2) VALUES (4, 4, 4);
INSERT 0 1
INSERT INTO t2 (id, data1, data2) VALUES (1, 2, 101);
INSERT 0 1
INSERT INTO t2 (id, data1, data2) VALUES (2, 1, 102);
INSERT 0 1
INSERT INTO t2 (id, data1, data2) VALUES (3, 4, 103);
INSERT 0 1
INSERT INTO t2 (id, data1, data2) VALUES (4, 3, 104);
INSERT 0 1
-- Now let's try an update that would require us to move info across segments
-- (depending upon exactly where the data is stored, which will vary depending
-- upon the number of segments; in my case, I used only 2 segments).
UPDATE t1 SET data2 = t2.data2 FROM t2 WHERE t1.data1 = t2.data1;
UPDATE 4
SELECT * from t1;
id | data1 | data2
----+-------+-------
1 | 1 | 102
3 | 3 | 104
2 | 2 | 101
4 | 4 | 103
(4 rows)
DROP TABLE IF EXISTS update_test;
DROP TABLE
DROP TABLE IF EXISTS t1;
DROP TABLE
DROP TABLE IF EXISTS t2;
DROP TABLE
--
-- @created 2009-01-27 14:00:00
-- @modified 2013-06-24 17:00:00
-- @tags ddl schema_topology
-- @description Joins
-- start_ignore
DROP TABLE IF EXISTS update_test;
psql:/path/sql_file:1: NOTICE: table "update_test" does not exist, skipping
DROP TABLE
DROP TABLE IF EXISTS t1;
psql:/path/sql_file:1: NOTICE: table "t1" does not exist, skipping
DROP TABLE
DROP TABLE IF EXISTS t2;
psql:/path/sql_file:1: NOTICE: table "t2" does not exist, skipping
DROP TABLE
-- end_ignore
CREATE TABLE update_test (
e INT DEFAULT 1,
a INT DEFAULT 10,
b INT,
c TEXT
);
psql:/path/sql_file:1: NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'e' as the Greenplum Database data distribution key for this table.
HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew.
CREATE TABLE
INSERT INTO update_test(a,b,c) VALUES (5, 10, 'foo');
INSERT 0 1
INSERT INTO update_test(b,a) VALUES (15, 10);
INSERT 0 1
SELECT a,b,c FROM update_test ORDER BY a,b;
a | b | c
----+----+-----
5 | 10 | foo
10 | 15 |
(2 rows)
UPDATE update_test SET a = DEFAULT, b = DEFAULT;
UPDATE 2
SELECT a,b,c FROM update_test ORDER BY a,c;
a | b | c
----+---+-----
10 | | foo
10 | |
(2 rows)
-- aliases for the UPDATE target table
UPDATE update_test AS t SET b = 10 WHERE t.a = 10;
UPDATE 2
SELECT a,b,c FROM update_test ORDER BY a,c;
a | b | c
----+----+-----
10 | 10 | foo
10 | 10 |
(2 rows)
UPDATE update_test t SET b = t.b + 10 WHERE t.a = 10;
UPDATE 2
SELECT a,b,c FROM update_test ORDER BY a,c;
a | b | c
----+----+-----
10 | 20 | foo
10 | 20 |
(2 rows)
UPDATE update_test SET a=v.i FROM (VALUES(100, 20)) AS v(i, j)
WHERE update_test.b = v.j;
UPDATE 2
SELECT a,b,c FROM update_test ORDER BY a,c;
a | b | c
-----+----+-----
100 | 20 | foo
100 | 20 |
(2 rows)
-- ----------------------------------------------
-- Create 2 tables with the same columns, but distributed differently.
CREATE TABLE t1 (id INTEGER, data1 INTEGER, data2 INTEGER) DISTRIBUTED BY (id);
CREATE TABLE
CREATE TABLE t2 (id INTEGER, data1 INTEGER, data2 INTEGER) DISTRIBUTED BY (data1);
CREATE TABLE
INSERT INTO t1 (id, data1, data2) VALUES (1, 1, 1);
INSERT 0 1
INSERT INTO t1 (id, data1, data2) VALUES (2, 2, 2);
INSERT 0 1
INSERT INTO t1 (id, data1, data2) VALUES (3, 3, 3);
INSERT 0 1
INSERT INTO t1 (id, data1, data2) VALUES (4, 4, 4);
INSERT 0 1
INSERT INTO t2 (id, data1, data2) VALUES (1, 2, 101);
INSERT 0 1
INSERT INTO t2 (id, data1, data2) VALUES (2, 1, 102);
INSERT 0 1
INSERT INTO t2 (id, data1, data2) VALUES (3, 4, 103);
INSERT 0 1
INSERT INTO t2 (id, data1, data2) VALUES (4, 3, 104);
INSERT 0 1
-- Now let's try an update that would require us to move info across segments
-- (depending upon exactly where the data is stored, which will vary depending
-- upon the number of segments; in my case, I used only 2 segments).
UPDATE t1 SET data2 = t2.data2 FROM t2 WHERE t1.data1 = t2.data1;
UPDATE 4
SELECT * from t1;
id | data1 | data2
----+-------+-------
2 | 2 | 101
1 | 1 | 102
4 | 4 | 103
3 | 3 | 104
(4 rows)
DROP TABLE IF EXISTS update_test;
DROP TABLE
DROP TABLE IF EXISTS t1;
DROP TABLE
DROP TABLE IF EXISTS t2;
DROP TABLE
--
-- @created 2009-01-27 14:00:00
-- @modified 2013-06-24 17:00:00
-- @tags ddl schema_topology
-- @description Joins
-- start_ignore
DROP TABLE IF EXISTS update_test;
psql:/path/sql_file:1: NOTICE: table "update_test" does not exist, skipping
DROP TABLE
DROP TABLE IF EXISTS t1;
psql:/path/sql_file:1: NOTICE: table "t1" does not exist, skipping
DROP TABLE
DROP TABLE IF EXISTS t2;
psql:/path/sql_file:1: NOTICE: table "t2" does not exist, skipping
DROP TABLE
-- end_ignore
CREATE TABLE update_test (
e INT DEFAULT 1,
a INT DEFAULT 10,
b INT,
c TEXT
);
psql:/path/sql_file:1: NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'e' as the Greenplum Database data distribution key for this table.
HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew.
CREATE TABLE
INSERT INTO update_test(a,b,c) VALUES (5, 10, 'foo');
INSERT 0 1
INSERT INTO update_test(b,a) VALUES (15, 10);
INSERT 0 1
SELECT a,b,c FROM update_test ORDER BY a,b;
a | b | c
----+----+-----
5 | 10 | foo
10 | 15 |
(2 rows)
UPDATE update_test SET a = DEFAULT, b = DEFAULT;
UPDATE 2
SELECT a,b,c FROM update_test ORDER BY a,c;
a | b | c
----+---+-----
10 | | foo
10 | |
(2 rows)
-- aliases for the UPDATE target table
UPDATE update_test AS t SET b = 10 WHERE t.a = 10;
UPDATE 2
SELECT a,b,c FROM update_test ORDER BY a,c;
a | b | c
----+----+-----
10 | 10 | foo
10 | 10 |
(2 rows)
UPDATE update_test t SET b = t.b + 10 WHERE t.a = 10;
UPDATE 2
SELECT a,b,c FROM update_test ORDER BY a,c;
a | b | c
----+----+-----
10 | 20 | foo
10 | 20 |
(2 rows)
UPDATE update_test SET a=v.i FROM (VALUES(100, 20)) AS v(i, j)
WHERE update_test.b = v.j;
UPDATE 2
SELECT a,b,c FROM update_test ORDER BY a,c;
a | b | c
-----+----+-----
100 | 20 | foo
100 | 20 |
(2 rows)
-- ----------------------------------------------
-- Create 2 tables with the same columns, but distributed differently.
CREATE TABLE t1 (id INTEGER, data1 INTEGER, data2 INTEGER) DISTRIBUTED BY (id);
CREATE TABLE
CREATE TABLE t2 (id INTEGER, data1 INTEGER, data2 INTEGER) DISTRIBUTED BY (data1);
CREATE TABLE
INSERT INTO t1 (id, data1, data2) VALUES (1, 1, 1);
INSERT 0 1
INSERT INTO t1 (id, data1, data2) VALUES (2, 2, 2);
INSERT 0 1
INSERT INTO t1 (id, data1, data2) VALUES (3, 3, 3);
INSERT 0 1
INSERT INTO t1 (id, data1, data2) VALUES (4, 4, 4);
INSERT 0 1
INSERT INTO t2 (id, data1, data2) VALUES (1, 2, 101);
INSERT 0 1
INSERT INTO t2 (id, data1, data2) VALUES (2, 1, 102);
INSERT 0 1
INSERT INTO t2 (id, data1, data2) VALUES (3, 4, 103);
INSERT 0 1
INSERT INTO t2 (id, data1, data2) VALUES (4, 3, 104);
INSERT 0 1
-- Now let's try an update that would require us to move info across segments
-- (depending upon exactly where the data is stored, which will vary depending
-- upon the number of segments; in my case, I used only 2 segments).
UPDATE t1 SET data2 = t2.data2 FROM t2 WHERE t1.data1 = t2.data1;
UPDATE 4
SELECT * from t1;
id | data1 | data2
----+-------+-------
2 | 2 | 101
3 | 3 | 104
1 | 1 | 102
4 | 4 | 103
(4 rows)
DROP TABLE IF EXISTS update_test;
DROP TABLE
DROP TABLE IF EXISTS t1;
DROP TABLE
DROP TABLE IF EXISTS t2;
DROP TABLE
--
-- @created 2009-01-27 14:00:00
-- @modified 2013-06-24 17:00:00
-- @tags ddl schema_topology
-- @description Joins
-- start_ignore
DROP TABLE IF EXISTS update_test;
psql:/path/sql_file:1: NOTICE: table "update_test" does not exist, skipping
DROP TABLE
DROP TABLE IF EXISTS t1;
psql:/path/sql_file:1: NOTICE: table "t1" does not exist, skipping
DROP TABLE
DROP TABLE IF EXISTS t2;
psql:/path/sql_file:1: NOTICE: table "t2" does not exist, skipping
DROP TABLE
-- end_ignore
CREATE TABLE update_test (
e INT DEFAULT 1,
a INT DEFAULT 10,
b INT,
c TEXT
);
psql:/path/sql_file:1: NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'e' as the Greenplum Database data distribution key for this table.
HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew.
CREATE TABLE
INSERT INTO update_test(a,b,c) VALUES (5, 10, 'foo');
INSERT 0 1
INSERT INTO update_test(b,a) VALUES (15, 10);
INSERT 0 1
SELECT a,b,c FROM update_test ORDER BY a,b;
a | b | c
----+----+-----
5 | 10 | foo
10 | 15 |
(2 rows)
UPDATE update_test SET a = DEFAULT, b = DEFAULT;
UPDATE 2
SELECT a,b,c FROM update_test ORDER BY a,c;
a | b | c
----+---+-----
10 | | foo
10 | |
(2 rows)
-- aliases for the UPDATE target table
UPDATE update_test AS t SET b = 10 WHERE t.a = 10;
UPDATE 2
SELECT a,b,c FROM update_test ORDER BY a,c;
a | b | c
----+----+-----
10 | 10 | foo
10 | 10 |
(2 rows)
UPDATE update_test t SET b = t.b + 10 WHERE t.a = 10;
UPDATE 2
SELECT a,b,c FROM update_test ORDER BY a,c;
a | b | c
----+----+-----
10 | 20 | foo
10 | 20 |
(2 rows)
UPDATE update_test SET a=v.i FROM (VALUES(100, 20)) AS v(i, j)
WHERE update_test.b = v.j;
UPDATE 2
SELECT a,b,c FROM update_test ORDER BY a,c;
a | b | c
-----+----+-----
100 | 20 | foo
100 | 20 |
(2 rows)
-- ----------------------------------------------
-- Create 2 tables with the same columns, but distributed differently.
CREATE TABLE t1 (id INTEGER, data1 INTEGER, data2 INTEGER) DISTRIBUTED BY (id);
CREATE TABLE
CREATE TABLE t2 (id INTEGER, data1 INTEGER, data2 INTEGER) DISTRIBUTED BY (data1);
CREATE TABLE
INSERT INTO t1 (id, data1, data2) VALUES (1, 1, 1);
INSERT 0 1
INSERT INTO t1 (id, data1, data2) VALUES (2, 2, 2);
INSERT 0 1
INSERT INTO t1 (id, data1, data2) VALUES (3, 3, 3);
INSERT 0 1
INSERT INTO t1 (id, data1, data2) VALUES (4, 4, 4);
INSERT 0 1
INSERT INTO t2 (id, data1, data2) VALUES (1, 2, 101);
INSERT 0 1
INSERT INTO t2 (id, data1, data2) VALUES (2, 1, 102);
INSERT 0 1
INSERT INTO t2 (id, data1, data2) VALUES (3, 4, 103);
INSERT 0 1
INSERT INTO t2 (id, data1, data2) VALUES (4, 3, 104);
INSERT 0 1
-- Now let's try an update that would require us to move info across segments
-- (depending upon exactly where the data is stored, which will vary depending
-- upon the number of segments; in my case, I used only 2 segments).
UPDATE t1 SET data2 = t2.data2 FROM t2 WHERE t1.data1 = t2.data1;
UPDATE 4
SELECT * from t1;
id | data1 | data2
----+-------+-------
1 | 1 | 102
3 | 3 | 104
2 | 2 | 101
4 | 4 | 103
(4 rows)
DROP TABLE IF EXISTS update_test;
DROP TABLE
DROP TABLE IF EXISTS t1;
DROP TABLE
DROP TABLE IF EXISTS t2;
DROP TABLE
--
-- @created 2009-01-27 14:00:00
-- @modified 2013-06-24 17:00:00
-- @tags ddl schema_topology
-- @description Joins
-- start_ignore
DROP TABLE IF EXISTS update_test;
psql:/path/sql_file:1: NOTICE: table "update_test" does not exist, skipping
DROP TABLE
DROP TABLE IF EXISTS t1;
DROP TABLE
DROP TABLE IF EXISTS t2;
DROP TABLE
-- end_ignore
CREATE TABLE update_test (
e INT DEFAULT 1,
a INT DEFAULT 10,
b INT,
c TEXT
);
psql:/path/sql_file:1: NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'e' as the Greenplum Database data distribution key for this table.
HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew.
CREATE TABLE
INSERT INTO update_test(a,b,c) VALUES (5, 10, 'foo');
INSERT 0 1
INSERT INTO update_test(b,a) VALUES (15, 10);
INSERT 0 1
SELECT a,b,c FROM update_test ORDER BY a,b;
a | b | c
----+----+-----
5 | 10 | foo
10 | 15 |
(2 rows)
UPDATE update_test SET a = DEFAULT, b = DEFAULT;
UPDATE 2
SELECT a,b,c FROM update_test ORDER BY a,c;
a | b | c
----+---+-----
10 | | foo
10 | |
(2 rows)
-- aliases for the UPDATE target table
UPDATE update_test AS t SET b = 10 WHERE t.a = 10;
UPDATE 2
SELECT a,b,c FROM update_test ORDER BY a,c;
a | b | c
----+----+-----
10 | 10 | foo
10 | 10 |
(2 rows)
UPDATE update_test t SET b = t.b + 10 WHERE t.a = 10;
UPDATE 2
SELECT a,b,c FROM update_test ORDER BY a,c;
a | b | c
----+----+-----
10 | 20 | foo
10 | 20 |
(2 rows)
UPDATE update_test SET a=v.i FROM (VALUES(100, 20)) AS v(i, j)
WHERE update_test.b = v.j;
UPDATE 2
SELECT a,b,c FROM update_test ORDER BY a,c;
a | b | c
-----+----+-----
100 | 20 | foo
100 | 20 |
(2 rows)
-- ----------------------------------------------
-- Create 2 tables with the same columns, but distributed differently.
CREATE TABLE t1 (id INTEGER, data1 INTEGER, data2 INTEGER) DISTRIBUTED BY (id);
CREATE TABLE
CREATE TABLE t2 (id INTEGER, data1 INTEGER, data2 INTEGER) DISTRIBUTED BY (data1);
CREATE TABLE
INSERT INTO t1 (id, data1, data2) VALUES (1, 1, 1);
INSERT 0 1
INSERT INTO t1 (id, data1, data2) VALUES (2, 2, 2);
INSERT 0 1
INSERT INTO t1 (id, data1, data2) VALUES (3, 3, 3);
INSERT 0 1
INSERT INTO t1 (id, data1, data2) VALUES (4, 4, 4);
INSERT 0 1
INSERT INTO t2 (id, data1, data2) VALUES (1, 2, 101);
INSERT 0 1
INSERT INTO t2 (id, data1, data2) VALUES (2, 1, 102);
INSERT 0 1
INSERT INTO t2 (id, data1, data2) VALUES (3, 4, 103);
INSERT 0 1
INSERT INTO t2 (id, data1, data2) VALUES (4, 3, 104);
INSERT 0 1
-- Now let's try an update that would require us to move info across segments
-- (depending upon exactly where the data is stored, which will vary depending
-- upon the number of segments; in my case, I used only 2 segments).
UPDATE t1 SET data2 = t2.data2 FROM t2 WHERE t1.data1 = t2.data1;
UPDATE 4
SELECT * from t1;
id | data1 | data2
----+-------+-------
4 | 4 | 103
1 | 1 | 102
2 | 2 | 101
3 | 3 | 104
(4 rows)
DROP TABLE IF EXISTS update_test;
DROP TABLE
DROP TABLE IF EXISTS t1;
DROP TABLE
DROP TABLE IF EXISTS t2;
DROP TABLE
--
-- @created 2009-01-27 14:00:00
-- @modified 2013-06-24 17:00:00
-- @tags ddl schema_topology
-- @description Joins
-- start_ignore
DROP TABLE IF EXISTS update_test;
psql:/path/sql_file:1: NOTICE: table "update_test" does not exist, skipping
DROP TABLE
DROP TABLE IF EXISTS t1;
psql:/path/sql_file:1: NOTICE: table "t1" does not exist, skipping
DROP TABLE
DROP TABLE IF EXISTS t2;
psql:/path/sql_file:1: NOTICE: table "t2" does not exist, skipping
DROP TABLE
-- end_ignore
CREATE TABLE update_test (
e INT DEFAULT 1,
a INT DEFAULT 10,
b INT,
c TEXT
);
psql:/path/sql_file:1: NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'e' as the Greenplum Database data distribution key for this table.
HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew.
CREATE TABLE
INSERT INTO update_test(a,b,c) VALUES (5, 10, 'foo');
INSERT 0 1
INSERT INTO update_test(b,a) VALUES (15, 10);
INSERT 0 1
SELECT a,b,c FROM update_test ORDER BY a,b;
a | b | c
----+----+-----
5 | 10 | foo
10 | 15 |
(2 rows)
UPDATE update_test SET a = DEFAULT, b = DEFAULT;
UPDATE 2
SELECT a,b,c FROM update_test ORDER BY a,c;
a | b | c
----+---+-----
10 | | foo
10 | |
(2 rows)
-- aliases for the UPDATE target table
UPDATE update_test AS t SET b = 10 WHERE t.a = 10;
UPDATE 2
SELECT a,b,c FROM update_test ORDER BY a,c;
a | b | c
----+----+-----
10 | 10 | foo
10 | 10 |
(2 rows)
UPDATE update_test t SET b = t.b + 10 WHERE t.a = 10;
UPDATE 2
SELECT a,b,c FROM update_test ORDER BY a,c;
a | b | c
----+----+-----
10 | 20 | foo
10 | 20 |
(2 rows)
UPDATE update_test SET a=v.i FROM (VALUES(100, 20)) AS v(i, j)
WHERE update_test.b = v.j;
UPDATE 2
SELECT a,b,c FROM update_test ORDER BY a,c;
a | b | c
-----+----+-----
100 | 20 | foo
100 | 20 | foo
(2 rows)
-- ----------------------------------------------
-- Create 2 tables with the same columns, but distributed differently.
CREATE TABLE t1 (id INTEGER, data1 INTEGER, data2 INTEGER) DISTRIBUTED BY (id);
CREATE TABLE
CREATE TABLE t2 (id INTEGER, data1 INTEGER, data2 INTEGER) DISTRIBUTED BY (data1);
CREATE TABLE
INSERT INTO t1 (id, data1, data2) VALUES (1, 1, 1);
INSERT 0 1
INSERT INTO t1 (id, data1, data2) VALUES (2, 2, 2);
INSERT 0 1
INSERT INTO t1 (id, data1, data2) VALUES (3, 3, 3);
INSERT 0 1
INSERT INTO t1 (id, data1, data2) VALUES (4, 4, 4);
INSERT 0 1
INSERT INTO t2 (id, data1, data2) VALUES (1, 2, 101);
INSERT 0 1
INSERT INTO t2 (id, data1, data2) VALUES (2, 1, 102);
INSERT 0 1
INSERT INTO t2 (id, data1, data2) VALUES (3, 4, 103);
INSERT 0 1
INSERT INTO t2 (id, data1, data2) VALUES (4, 3, 104);
INSERT 0 1
-- Now let's try an update that would require us to move info across segments
-- (depending upon exactly where the data is stored, which will vary depending
-- upon the number of segments; in my case, I used only 2 segments).
UPDATE t1 SET data2 = t2.data2 FROM t2 WHERE t1.data1 = t2.data1;
UPDATE 4
SELECT * from t1;
id | data1 | data2
----+-------+-------
1 | 1 | 102
4 | 4 | 103
2 | 2 | 101
3 | 3 | 104
(4 rows)
DROP TABLE IF EXISTS update_test;
DROP TABLE
DROP TABLE IF EXISTS t1;
DROP TABLE
DROP TABLE IF EXISTS t2;
DROP TABLE
-- start_ignore
SET optimizer=on;
SET
SET optimizer_log=on;
SET
-- end_ignore
--
-- @created 2009-01-27 14:00:00
-- @modified 2013-06-24 17:00:00
-- @tags ddl schema_topology
-- @description Joins
-- start_ignore
DROP TABLE IF EXISTS update_test;
psql:/path/sql_file:1: NOTICE: table "update_test" does not exist, skipping
DROP TABLE
DROP TABLE IF EXISTS t1;
psql:/path/sql_file:1: NOTICE: table "t1" does not exist, skipping
DROP TABLE
DROP TABLE IF EXISTS t2;
psql:/path/sql_file:1: NOTICE: table "t2" does not exist, skipping
DROP TABLE
-- end_ignore
CREATE TABLE update_test (
e INT DEFAULT 1,
a INT DEFAULT 10,
b INT,
c TEXT
);
psql:/path/sql_file:1: NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'e' as the Greenplum Database data distribution key for this table.
HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew.
CREATE TABLE
INSERT INTO update_test(a,b,c) VALUES (5, 10, 'foo');
INSERT 0 1
INSERT INTO update_test(b,a) VALUES (15, 10);
INSERT 0 1
SELECT a,b,c FROM update_test ORDER BY a,b;
a | b | c
----+----+-----
5 | 10 | foo
10 | 15 |
(2 rows)
UPDATE update_test SET a = DEFAULT, b = DEFAULT;
UPDATE 2
SELECT a,b,c FROM update_test ORDER BY a,b;
a | b | c
----+---+-----
10 | | foo
10 | |
(2 rows)
-- aliases for the UPDATE target table
UPDATE update_test AS t SET b = 10 WHERE t.a = 10;
UPDATE 2
SELECT a,b,c FROM update_test ORDER BY a,c;
a | b | c
----+----+-----
10 | 10 | foo
10 | 10 |
(2 rows)
UPDATE update_test t SET b = t.b + 10 WHERE t.a = 10;
UPDATE 2
SELECT a,b,c FROM update_test ORDER BY a,c;
a | b | c
----+----+-----
10 | 20 | foo
10 | 20 |
(2 rows)
UPDATE update_test SET a=v.i FROM (VALUES(100, 20)) AS v(i, j)
WHERE update_test.b = v.j;
UPDATE 2
SELECT a,b,c FROM update_test ORDER BY a,c;
a | b | c
-----+----+-----
100 | 20 | foo
100 | 20 |
(2 rows)
-- ----------------------------------------------
-- Create 2 tables with the same columns, but distributed differently.
CREATE TABLE t1 (id INTEGER, data1 INTEGER, data2 INTEGER) DISTRIBUTED BY (id);
CREATE TABLE
CREATE TABLE t2 (id INTEGER, data1 INTEGER, data2 INTEGER) DISTRIBUTED BY (data1);
CREATE TABLE
INSERT INTO t1 (id, data1, data2) VALUES (1, 1, 1);
INSERT 0 1
INSERT INTO t1 (id, data1, data2) VALUES (2, 2, 2);
INSERT 0 1
INSERT INTO t1 (id, data1, data2) VALUES (3, 3, 3);
INSERT 0 1
INSERT INTO t1 (id, data1, data2) VALUES (4, 4, 4);
INSERT 0 1
INSERT INTO t2 (id, data1, data2) VALUES (1, 2, 101);
INSERT 0 1
INSERT INTO t2 (id, data1, data2) VALUES (2, 1, 102);
INSERT 0 1
INSERT INTO t2 (id, data1, data2) VALUES (3, 4, 103);
INSERT 0 1
INSERT INTO t2 (id, data1, data2) VALUES (4, 3, 104);
INSERT 0 1
-- Now let's try an update that would require us to move info across segments
-- (depending upon exactly where the data is stored, which will vary depending
-- upon the number of segments; in my case, I used only 2 segments).
UPDATE t1 SET data2 = t2.data2 FROM t2 WHERE t1.data1 = t2.data1;
UPDATE 4
SELECT * from t1;
id | data1 | data2
----+-------+-------
2 | 2 | 101
4 | 4 | 103
1 | 1 | 102
3 | 3 | 104
(4 rows)
DROP TABLE IF EXISTS update_test;
DROP TABLE
DROP TABLE IF EXISTS t1;
DROP TABLE
DROP TABLE IF EXISTS t2;
DROP TABLE
-- start_ignore
-- end_ignore
-- start_ignore
-- end_ignore
--
-- @created 2009-01-27 14:00:00
-- @modified 2013-06-24 17:00:00
-- @tags ddl schema_topology
-- @description Joins
-- start_ignore
DROP TABLE IF EXISTS update_test;
psql:/path/sql_file:1: NOTICE: table "update_test" does not exist, skipping
DROP TABLE
DROP TABLE IF EXISTS t1;
psql:/path/sql_file:1: NOTICE: table "t1" does not exist, skipping
DROP TABLE
DROP TABLE IF EXISTS t2;
psql:/path/sql_file:1: NOTICE: table "t2" does not exist, skipping
DROP TABLE
-- end_ignore
CREATE TABLE update_test (
e INT DEFAULT 1,
a INT DEFAULT 10,
b INT,
c TEXT
);
psql:/path/sql_file:1: NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'e' as the Greenplum Database data distribution key for this table.
HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew.
CREATE TABLE
INSERT INTO update_test(a,b,c) VALUES (5, 10, 'foo');
INSERT 0 1
INSERT INTO update_test(b,a) VALUES (15, 10);
INSERT 0 1
SELECT a,b,c FROM update_test ORDER BY a,b;
a | b | c
----+----+-----
5 | 10 | foo
10 | 15 |
(2 rows)
UPDATE update_test SET a = DEFAULT, b = DEFAULT;
UPDATE 2
SELECT a,b,c FROM update_test ORDER BY a,c;
a | b | c
----+---+-----
10 | | foo
10 | |
(2 rows)
-- aliases for the UPDATE target table
UPDATE update_test AS t SET b = 10 WHERE t.a = 10;
UPDATE 2
SELECT a,b,c FROM update_test ORDER BY a,c;
a | b | c
----+----+-----
10 | 10 | foo
10 | 10 |
(2 rows)
UPDATE update_test t SET b = t.b + 10 WHERE t.a = 10;
UPDATE 2
SELECT a,b,c FROM update_test ORDER BY a,c;
a | b | c
----+----+-----
10 | 20 | foo
10 | 20 |
(2 rows)
UPDATE update_test SET a=v.i FROM (VALUES(100, 20)) AS v(i, j)
WHERE update_test.b = v.j;
UPDATE 2
SELECT a,b,c FROM update_test ORDER BY a,c;
a | b | c
-----+----+-----
100 | 20 | foo
100 | 20 |
(2 rows)
-- ----------------------------------------------
-- Create 2 tables with the same columns, but distributed differently.
CREATE TABLE t1 (id INTEGER, data1 INTEGER, data2 INTEGER) DISTRIBUTED BY (id);
CREATE TABLE
CREATE TABLE t2 (id INTEGER, data1 INTEGER, data2 INTEGER) DISTRIBUTED BY (data1);
CREATE TABLE
INSERT INTO t1 (id, data1, data2) VALUES (1, 1, 1);
INSERT 0 1
INSERT INTO t1 (id, data1, data2) VALUES (2, 2, 2);
INSERT 0 1
INSERT INTO t1 (id, data1, data2) VALUES (3, 3, 3);
INSERT 0 1
INSERT INTO t1 (id, data1, data2) VALUES (4, 4, 4);
INSERT 0 1
INSERT INTO t2 (id, data1, data2) VALUES (1, 2, 101);
INSERT 0 1
INSERT INTO t2 (id, data1, data2) VALUES (2, 1, 102);
INSERT 0 1
INSERT INTO t2 (id, data1, data2) VALUES (3, 4, 103);
INSERT 0 1
INSERT INTO t2 (id, data1, data2) VALUES (4, 3, 104);
INSERT 0 1
-- Now let's try an update that would require us to move info across segments
-- (depending upon exactly where the data is stored, which will vary depending
-- upon the number of segments; in my case, I used only 2 segments).
UPDATE t1 SET data2 = t2.data2 FROM t2 WHERE t1.data1 = t2.data1;
UPDATE 4
SELECT * from t1;
id | data1 | data2
----+-------+-------
1 | 1 | 102
2 | 2 | 101
3 | 3 | 104
4 | 4 | 103
(4 rows)
DROP TABLE IF EXISTS update_test;
DROP TABLE
DROP TABLE IF EXISTS t1;
DROP TABLE
DROP TABLE IF EXISTS t2;
DROP TABLE
-- start_ignore
-- end_ignore
-- start_ignore
-- end_ignore
--
-- @created 2009-01-27 14:00:00
-- @modified 2013-06-24 17:00:00
-- @tags ddl schema_topology
-- @description Joins
-- start_ignore
DROP TABLE IF EXISTS update_test;
psql:/path/sql_file:1: NOTICE: table "update_test" does not exist, skipping
DROP TABLE
DROP TABLE IF EXISTS t1;
psql:/path/sql_file:1: NOTICE: table "t1" does not exist, skipping
DROP TABLE
DROP TABLE IF EXISTS t2;
psql:/path/sql_file:1: NOTICE: table "t2" does not exist, skipping
DROP TABLE
-- end_ignore
CREATE TABLE update_test (
e INT DEFAULT 1,
a INT DEFAULT 10,
b INT,
c TEXT
);
psql:/path/sql_file:1: NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'e' as the Greenplum Database data distribution key for this table.
HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew.
CREATE TABLE
INSERT INTO update_test(a,b,c) VALUES (5, 10, 'foo');
INSERT 0 1
INSERT INTO update_test(b,a) VALUES (15, 10);
INSERT 0 1
SELECT a,b,c FROM update_test ORDER BY a,b;
a | b | c
----+----+-----
5 | 10 | foo
10 | 15 |
(2 rows)
UPDATE update_test SET a = DEFAULT, b = DEFAULT;
UPDATE 2
SELECT a,b,c FROM update_test ORDER BY a,c;
a | b | c
----+---+-----
10 | | foo
10 | |
(2 rows)
-- aliases for the UPDATE target table
UPDATE update_test AS t SET b = 10 WHERE t.a = 10;
UPDATE 2
SELECT a,b,c FROM update_test ORDER BY a,c;
a | b | c
----+----+-----
10 | 10 | foo
10 | 10 |
(2 rows)
UPDATE update_test t SET b = t.b + 10 WHERE t.a = 10;
UPDATE 2
SELECT a,b,c FROM update_test ORDER BY a,c;
a | b | c
----+----+-----
10 | 20 | foo
10 | 20 |
(2 rows)
UPDATE update_test SET a=v.i FROM (VALUES(100, 20)) AS v(i, j)
WHERE update_test.b = v.j;
UPDATE 2
SELECT a,b,c FROM update_test ORDER BY a,c;
a | b | c
-----+----+-----
100 | 20 | foo
100 | 20 |
(2 rows)
-- ----------------------------------------------
-- Create 2 tables with the same columns, but distributed differently.
CREATE TABLE t1 (id INTEGER, data1 INTEGER, data2 INTEGER) DISTRIBUTED BY (id);
CREATE TABLE
CREATE TABLE t2 (id INTEGER, data1 INTEGER, data2 INTEGER) DISTRIBUTED BY (data1);
CREATE TABLE
INSERT INTO t1 (id, data1, data2) VALUES (1, 1, 1);
INSERT 0 1
INSERT INTO t1 (id, data1, data2) VALUES (2, 2, 2);
INSERT 0 1
INSERT INTO t1 (id, data1, data2) VALUES (3, 3, 3);
INSERT 0 1
INSERT INTO t1 (id, data1, data2) VALUES (4, 4, 4);
INSERT 0 1
INSERT INTO t2 (id, data1, data2) VALUES (1, 2, 101);
INSERT 0 1
INSERT INTO t2 (id, data1, data2) VALUES (2, 1, 102);
INSERT 0 1
INSERT INTO t2 (id, data1, data2) VALUES (3, 4, 103);
INSERT 0 1
INSERT INTO t2 (id, data1, data2) VALUES (4, 3, 104);
INSERT 0 1
-- Now let's try an update that would require us to move info across segments
-- (depending upon exactly where the data is stored, which will vary depending
-- upon the number of segments; in my case, I used only 2 segments).
UPDATE t1 SET data2 = t2.data2 FROM t2 WHERE t1.data1 = t2.data1;
UPDATE 4
SELECT * from t1;
id | data1 | data2
----+-------+-------
2 | 2 | 101
3 | 3 | 104
4 | 4 | 103
1 | 1 | 102
(4 rows)
DROP TABLE IF EXISTS update_test;
DROP TABLE
DROP TABLE IF EXISTS t1;
DROP TABLE
DROP TABLE IF EXISTS t2;
DROP TABLE
-- start_ignore
-- end_ignore
-- start_ignore
-- end_ignore
--
-- @created 2009-01-27 14:00:00
-- @modified 2013-06-24 17:00:00
-- @tags ddl schema_topology
-- @description Joins
-- start_ignore
DROP TABLE IF EXISTS update_test;
psql:/path/sql_file:1: NOTICE: table "update_test" does not exist, skipping
DROP TABLE
DROP TABLE IF EXISTS t1;
psql:/path/sql_file:1: NOTICE: table "t1" does not exist, skipping
DROP TABLE
DROP TABLE IF EXISTS t2;
psql:/path/sql_file:1: NOTICE: table "t2" does not exist, skipping
DROP TABLE
-- end_ignore
CREATE TABLE update_test (
e INT DEFAULT 1,
a INT DEFAULT 10,
b INT,
c TEXT
);
psql:/path/sql_file:1: NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'e' as the Greenplum Database data distribution key for this table.
HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew.
CREATE TABLE
INSERT INTO update_test(a,b,c) VALUES (5, 10, 'foo');
INSERT 0 1
INSERT INTO update_test(b,a) VALUES (15, 10);
INSERT 0 1
SELECT a,b,c FROM update_test ORDER BY a,b;
a | b | c
----+----+-----
5 | 10 | foo
10 | 15 |
(2 rows)
UPDATE update_test SET a = DEFAULT, b = DEFAULT;
UPDATE 2
SELECT a,b,c FROM update_test ORDER BY a,c;
a | b | c
----+---+-----
10 | | foo
10 | |
(2 rows)
-- aliases for the UPDATE target table
UPDATE update_test AS t SET b = 10 WHERE t.a = 10;
UPDATE 2
SELECT a,b,c FROM update_test ORDER BY a,c;
a | b | c
----+----+-----
10 | 10 | foo
10 | 10 |
(2 rows)
UPDATE update_test t SET b = t.b + 10 WHERE t.a = 10;
UPDATE 2
SELECT a,b,c FROM update_test ORDER BY a,c;
a | b | c
----+----+-----
10 | 20 | foo
10 | 20 |
(2 rows)
UPDATE update_test SET a=v.i FROM (VALUES(100, 20)) AS v(i, j)
WHERE update_test.b = v.j;
UPDATE 2
SELECT a,b,c FROM update_test ORDER BY a,c;
a | b | c
-----+----+-----
100 | 20 | foo
100 | 20 |
(2 rows)
-- ----------------------------------------------
-- Create 2 tables with the same columns, but distributed differently.
CREATE TABLE t1 (id INTEGER, data1 INTEGER, data2 INTEGER) DISTRIBUTED BY (id);
CREATE TABLE
CREATE TABLE t2 (id INTEGER, data1 INTEGER, data2 INTEGER) DISTRIBUTED BY (data1);
CREATE TABLE
INSERT INTO t1 (id, data1, data2) VALUES (1, 1, 1);
INSERT 0 1
INSERT INTO t1 (id, data1, data2) VALUES (2, 2, 2);
INSERT 0 1
INSERT INTO t1 (id, data1, data2) VALUES (3, 3, 3);
INSERT 0 1
INSERT INTO t1 (id, data1, data2) VALUES (4, 4, 4);
INSERT 0 1
INSERT INTO t2 (id, data1, data2) VALUES (1, 2, 101);
INSERT 0 1
INSERT INTO t2 (id, data1, data2) VALUES (2, 1, 102);
INSERT 0 1
INSERT INTO t2 (id, data1, data2) VALUES (3, 4, 103);
INSERT 0 1
INSERT INTO t2 (id, data1, data2) VALUES (4, 3, 104);
INSERT 0 1
-- Now let's try an update that would require us to move info across segments
-- (depending upon exactly where the data is stored, which will vary depending
-- upon the number of segments; in my case, I used only 2 segments).
UPDATE t1 SET data2 = t2.data2 FROM t2 WHERE t1.data1 = t2.data1;
UPDATE 4
SELECT * from t1;
id | data1 | data2
----+-------+-------
1 | 1 | 102
3 | 3 | 104
2 | 2 | 101
4 | 4 | 103
(4 rows)
DROP TABLE IF EXISTS update_test;
DROP TABLE
DROP TABLE IF EXISTS t1;
DROP TABLE
DROP TABLE IF EXISTS t2;
DROP TABLE
-- start_ignore
-- end_ignore
-- start_ignore
-- end_ignore
--
-- @created 2009-01-27 14:00:00
-- @modified 2013-06-24 17:00:00
-- @tags ddl schema_topology
-- @description Joins
-- start_ignore
DROP TABLE IF EXISTS update_test;
psql:/path/sql_file:1: NOTICE: table "update_test" does not exist, skipping
DROP TABLE
DROP TABLE IF EXISTS t1;
psql:/path/sql_file:1: NOTICE: table "t1" does not exist, skipping
DROP TABLE
DROP TABLE IF EXISTS t2;
psql:/path/sql_file:1: NOTICE: table "t2" does not exist, skipping
DROP TABLE
-- end_ignore
CREATE TABLE update_test (
e INT DEFAULT 1,
a INT DEFAULT 10,
b INT,
c TEXT
);
psql:/path/sql_file:1: NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'e' as the Greenplum Database data distribution key for this table.
HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew.
CREATE TABLE
INSERT INTO update_test(a,b,c) VALUES (5, 10, 'foo');
INSERT 0 1
INSERT INTO update_test(b,a) VALUES (15, 10);
INSERT 0 1
SELECT a,b,c FROM update_test ORDER BY a,b;
a | b | c
----+----+-----
5 | 10 | foo
10 | 15 |
(2 rows)
UPDATE update_test SET a = DEFAULT, b = DEFAULT;
UPDATE 2
SELECT a,b,c FROM update_test ORDER BY a,c;
a | b | c
----+---+-----
10 | | foo
10 | |
(2 rows)
-- aliases for the UPDATE target table
UPDATE update_test AS t SET b = 10 WHERE t.a = 10;
UPDATE 2
SELECT a,b,c FROM update_test ORDER BY a,c;
a | b | c
----+----+-----
10 | 10 | foo
10 | 10 |
(2 rows)
UPDATE update_test t SET b = t.b + 10 WHERE t.a = 10;
UPDATE 2
SELECT a,b,c FROM update_test ORDER BY a,c;
a | b | c
----+----+-----
10 | 20 | foo
10 | 20 |
(2 rows)
UPDATE update_test SET a=v.i FROM (VALUES(100, 20)) AS v(i, j)
WHERE update_test.b = v.j;
UPDATE 2
SELECT a,b,c FROM update_test ORDER BY a,c;
a | b | c
-----+----+-----
100 | 20 | foo
100 | 20 |
(2 rows)
-- ----------------------------------------------
-- Create 2 tables with the same columns, but distributed differently.
CREATE TABLE t1 (id INTEGER, data1 INTEGER, data2 INTEGER) DISTRIBUTED BY (id);
CREATE TABLE
CREATE TABLE t2 (id INTEGER, data1 INTEGER, data2 INTEGER) DISTRIBUTED BY (data1);
CREATE TABLE
INSERT INTO t1 (id, data1, data2) VALUES (1, 1, 1);
INSERT 0 1
INSERT INTO t1 (id, data1, data2) VALUES (2, 2, 2);
INSERT 0 1
INSERT INTO t1 (id, data1, data2) VALUES (3, 3, 3);
INSERT 0 1
INSERT INTO t1 (id, data1, data2) VALUES (4, 4, 4);
INSERT 0 1
INSERT INTO t2 (id, data1, data2) VALUES (1, 2, 101);
INSERT 0 1
INSERT INTO t2 (id, data1, data2) VALUES (2, 1, 102);
INSERT 0 1
INSERT INTO t2 (id, data1, data2) VALUES (3, 4, 103);
INSERT 0 1
INSERT INTO t2 (id, data1, data2) VALUES (4, 3, 104);
INSERT 0 1
-- Now let's try an update that would require us to move info across segments
-- (depending upon exactly where the data is stored, which will vary depending
-- upon the number of segments; in my case, I used only 2 segments).
UPDATE t1 SET data2 = t2.data2 FROM t2 WHERE t1.data1 = t2.data1;
UPDATE 4
SELECT * from t1;
id | data1 | data2
----+-------+-------
4 | 4 | 103
2 | 2 | 101
3 | 3 | 104
1 | 1 | 102
(4 rows)
DROP TABLE IF EXISTS update_test;
DROP TABLE
DROP TABLE IF EXISTS t1;
DROP TABLE
DROP TABLE IF EXISTS t2;
DROP TABLE
-- start_ignore
-- end_ignore
-- start_ignore
-- end_ignore
--
-- @created 2009-01-27 14:00:00
-- @modified 2013-06-24 17:00:00
-- @tags ddl schema_topology
-- @description Joins
-- start_ignore
DROP TABLE IF EXISTS update_test;
psql:/path/sql_file:1: NOTICE: table "update_test" does not exist, skipping
DROP TABLE
DROP TABLE IF EXISTS t1;
psql:/path/sql_file:1: NOTICE: table "t1" does not exist, skipping
DROP TABLE
DROP TABLE IF EXISTS t2;
psql:/path/sql_file:1: NOTICE: table "t2" does not exist, skipping
DROP TABLE
-- end_ignore
CREATE TABLE update_test (
e INT DEFAULT 1,
a INT DEFAULT 10,
b INT,
c TEXT
);
psql:/path/sql_file:1: NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'e' as the Greenplum Database data distribution key for this table.
HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew.
CREATE TABLE
INSERT INTO update_test(a,b,c) VALUES (5, 10, 'foo');
INSERT 0 1
INSERT INTO update_test(b,a) VALUES (15, 10);
INSERT 0 1
SELECT a,b,c FROM update_test ORDER BY a,b;
a | b | c
----+----+-----
5 | 10 | foo
10 | 15 |
(2 rows)
UPDATE update_test SET a = DEFAULT, b = DEFAULT;
UPDATE 2
SELECT a,b,c FROM update_test ORDER BY a,c;
a | b | c
----+---+-----
10 | | foo
10 | |
(2 rows)
-- aliases for the UPDATE target table
UPDATE update_test AS t SET b = 10 WHERE t.a = 10;
UPDATE 2
SELECT a,b,c FROM update_test ORDER BY a,c;
a | b | c
----+----+-----
10 | 10 | foo
10 | 10 |
(2 rows)
UPDATE update_test t SET b = t.b + 10 WHERE t.a = 10;
UPDATE 2
SELECT a,b,c FROM update_test ORDER BY a,c;
a | b | c
----+----+-----
10 | 20 | foo
10 | 20 |
(2 rows)
UPDATE update_test SET a=v.i FROM (VALUES(100, 20)) AS v(i, j)
WHERE update_test.b = v.j;
UPDATE 2
SELECT a,b,c FROM update_test ORDER BY a,c;
a | b | c
-----+----+-----
100 | 20 | foo
100 | 20 |
(2 rows)
-- ----------------------------------------------
-- Create 2 tables with the same columns, but distributed differently.
CREATE TABLE t1 (id INTEGER, data1 INTEGER, data2 INTEGER) DISTRIBUTED BY (id);
CREATE TABLE
CREATE TABLE t2 (id INTEGER, data1 INTEGER, data2 INTEGER) DISTRIBUTED BY (data1);
CREATE TABLE
INSERT INTO t1 (id, data1, data2) VALUES (1, 1, 1);
INSERT 0 1
INSERT INTO t1 (id, data1, data2) VALUES (2, 2, 2);
INSERT 0 1
INSERT INTO t1 (id, data1, data2) VALUES (3, 3, 3);
INSERT 0 1
INSERT INTO t1 (id, data1, data2) VALUES (4, 4, 4);
INSERT 0 1
INSERT INTO t2 (id, data1, data2) VALUES (1, 2, 101);
INSERT 0 1
INSERT INTO t2 (id, data1, data2) VALUES (2, 1, 102);
INSERT 0 1
INSERT INTO t2 (id, data1, data2) VALUES (3, 4, 103);
INSERT 0 1
INSERT INTO t2 (id, data1, data2) VALUES (4, 3, 104);
INSERT 0 1
-- Now let's try an update that would require us to move info across segments
-- (depending upon exactly where the data is stored, which will vary depending
-- upon the number of segments; in my case, I used only 2 segments).
UPDATE t1 SET data2 = t2.data2 FROM t2 WHERE t1.data1 = t2.data1;
UPDATE 4
SELECT * from t1;
id | data1 | data2
----+-------+-------
3 | 3 | 104
1 | 1 | 102
2 | 2 | 101
4 | 4 | 103
(4 rows)
DROP TABLE IF EXISTS update_test;
DROP TABLE
DROP TABLE IF EXISTS t1;
DROP TABLE
DROP TABLE IF EXISTS t2;
DROP TABLE
-- start_ignore
-- end_ignore
--
-- @created 2009-01-27 14:00:00
-- @modified 2013-06-24 17:00:00
-- @tags ddl schema_topology
-- @description Joins
-- start_ignore
DROP TABLE IF EXISTS update_test;
DROP TABLE IF EXISTS t1;
DROP TABLE IF EXISTS t2;
-- end_ignore
CREATE TABLE update_test (
e INT DEFAULT 1,
a INT DEFAULT 10,
b INT,
c TEXT
);
INSERT INTO update_test(a,b,c) VALUES (5, 10, 'foo');
INSERT INTO update_test(b,a) VALUES (15, 10);
SELECT a,b,c FROM update_test ORDER BY a,b;
UPDATE update_test SET a = DEFAULT, b = DEFAULT;
SELECT a,b,c FROM update_test ORDER BY a,c;
-- aliases for the UPDATE target table
UPDATE update_test AS t SET b = 10 WHERE t.a = 10;
SELECT a,b,c FROM update_test ORDER BY a,c;
UPDATE update_test t SET b = t.b + 10 WHERE t.a = 10;
SELECT a,b,c FROM update_test ORDER BY a,c;
UPDATE update_test SET a=v.i FROM (VALUES(100, 20)) AS v(i, j)
WHERE update_test.b = v.j;
SELECT a,b,c FROM update_test ORDER BY a,c;
-- ----------------------------------------------
-- Create 2 tables with the same columns, but distributed differently.
CREATE TABLE t1 (id INTEGER, data1 INTEGER, data2 INTEGER) DISTRIBUTED BY (id);
CREATE TABLE t2 (id INTEGER, data1 INTEGER, data2 INTEGER) DISTRIBUTED BY (data1);
INSERT INTO t1 (id, data1, data2) VALUES (1, 1, 1);
INSERT INTO t1 (id, data1, data2) VALUES (2, 2, 2);
INSERT INTO t1 (id, data1, data2) VALUES (3, 3, 3);
INSERT INTO t1 (id, data1, data2) VALUES (4, 4, 4);
INSERT INTO t2 (id, data1, data2) VALUES (1, 2, 101);
INSERT INTO t2 (id, data1, data2) VALUES (2, 1, 102);
INSERT INTO t2 (id, data1, data2) VALUES (3, 4, 103);
INSERT INTO t2 (id, data1, data2) VALUES (4, 3, 104);
-- Now let's try an update that would require us to move info across segments
-- (depending upon exactly where the data is stored, which will vary depending
-- upon the number of segments; in my case, I used only 2 segments).
UPDATE t1 SET data2 = t2.data2 FROM t2 WHERE t1.data1 = t2.data1;
SELECT * from t1;
DROP TABLE IF EXISTS update_test;
DROP TABLE IF EXISTS t1;
DROP TABLE IF EXISTS t2;
--
-- @created 2009-01-27 14:00:00
-- @modified 2013-06-24 17:00:00
-- @tags ddl schema_topology
-- @description Joins
-- start_ignore
drop table if exists r;
psql:/path/sql_file:1: NOTICE: table "r" does not exist, skipping
DROP TABLE
drop table if exists m;
psql:/path/sql_file:1: NOTICE: table "m" does not exist, skipping
DROP TABLE
drop table if exists purchase_par cascade;
psql:/path/sql_file:1: NOTICE: table "purchase_par" does not exist, skipping
DROP TABLE
-- 3 tables: heap, master-only and partitioned table
-- end_ignore
create table r (a int, b int) distributed by (a);
CREATE TABLE
create table m ();
psql:/path/sql_file:1: NOTICE: Table has no attributes to distribute on.
CREATE TABLE
alter table m add column a int;
ALTER TABLE
alter table m add column b int;
ALTER TABLE
CREATE TABLE purchase_par (id int, year int, month int, day int, region text)
DISTRIBUTED BY (id)
PARTITION BY LIST (region)
( PARTITION usa VALUES ('south'),
PARTITION europe VALUES ('north'),
PARTITION asia VALUES ('east'),
DEFAULT PARTITION other_regions)
;
psql:/path/sql_file:1: NOTICE: CREATE TABLE will create partition "purchase_par_1_prt_usa" for table "purchase_par"
psql:/path/sql_file:1: NOTICE: CREATE TABLE will create partition "purchase_par_1_prt_europe" for table "purchase_par"
psql:/path/sql_file:1: NOTICE: CREATE TABLE will create partition "purchase_par_1_prt_asia" for table "purchase_par"
psql:/path/sql_file:1: NOTICE: CREATE TABLE will create partition "purchase_par_1_prt_other_regions" for table "purchase_par"
CREATE TABLE
insert into r select generate_series(1, 10000), generate_series(1, 10000) * 3;
INSERT 0 10000
insert into m select generate_series(1, 1000), generate_series(1, 1000) * 4;
INSERT 0 1000
Insert into purchase_par values(1,2009,13,29,'east');
INSERT 0 1
Insert into purchase_par values(2,2009,13,29,'west');
INSERT 0 1
Insert into purchase_par values(3,2009,13,29,'north');
INSERT 0 1
Insert into purchase_par values(4,2009,13,29,'south');
INSERT 0 1
Insert into purchase_par values(5,2009,13,29,'east');
INSERT 0 1
Insert into purchase_par values(6,2009,13,29,'west');
INSERT 0 1
Insert into purchase_par values(7,2002,13,29,'north');
INSERT 0 1
Insert into purchase_par values(8,2003,13,29,'south');
INSERT 0 1
Insert into purchase_par values(9,2004,13,29,'east');
INSERT 0 1
Insert into purchase_par values(10,2005,13,29,'west');
INSERT 0 1
Insert into purchase_par values(11,2006,13,29,'north');
INSERT 0 1
Insert into purchase_par values(12,2007,01,29,'south');
INSERT 0 1
Insert into purchase_par values(13,2008,02,29,'east');
INSERT 0 1
Insert into purchase_par values(14,2002,03,29,'west');
INSERT 0 1
Insert into purchase_par values(15,2003,04,29,'north');
INSERT 0 1
Insert into purchase_par values(16,2004,05,29,'south');
INSERT 0 1
Insert into purchase_par values(17,2005,06,29,'east');
INSERT 0 1
Insert into purchase_par values(18,2006,07,29,'west');
INSERT 0 1
Insert into purchase_par values(19,2007,08,29,'north');
INSERT 0 1
Insert into purchase_par values(20,2008,09,29,'south');
INSERT 0 1
-- partitioned table: update --
select purchase_par.* from purchase_par where id in (select m.b from m, r where m.a = r.b) and day in (select a from r);
id | year | month | day | region
----+------+-------+-----+--------
12 | 2007 | 1 | 29 | south
(1 row)
update purchase_par set region = 'new_region' where id in (select m.b from m, r where m.a = r.b) and day in (select a from r);
psql:/path/sql_file:1: ERROR: moving tuple from partition "purchase_par_1_prt_usa" to partition "purchase_par_1_prt_other_regions" not supported (seg1 rh55-qavm74:7393 pid=30843)
select purchase_par.* from purchase_par where id in (select m.b from m, r where m.a = r.b) and day in (select a from r);
id | year | month | day | region
----+------+-------+-----+--------
12 | 2007 | 1 | 29 | south
(1 row)
select purchase_par.* from purchase_par,m,r where purchase_par.id = m.b and purchase_par.month = r.b;
id | year | month | day | region
----+------+-------+-----+--------
20 | 2008 | 9 | 29 | south
(1 row)
update purchase_par set month = month+1 from r,m where purchase_par.id = m.b and purchase_par.month = r.b;
UPDATE 1
select purchase_par.* from purchase_par,m,r where purchase_par.id = m.b and purchase_par.month = r.b+1;
id | year | month | day | region
----+------+-------+-----+--------
20 | 2008 | 10 | 29 | south
8 | 2003 | 13 | 29 | south
4 | 2009 | 13 | 29 | south
(3 rows)
-- heap table: delete --
select * from r where b in (select month-1 from purchase_par,m where purchase_par.id = m.b);
a | b
---+----
3 | 9
4 | 12
(2 rows)
delete from r where b in (select month-1 from purchase_par,m where purchase_par.id = m.b);
DELETE 2
select * from r where b in (select month-1 from purchase_par,m where purchase_par.id = m.b);
a | b
---+---
(0 rows)
-- master-only table: update
select m.* from m,r,purchase_par where m.a = r.b and m.b = purchase_par.id;
a | b
---+----
3 | 12
(1 row)
delete from m using r,purchase_par where m.a = r.b and m.b = purchase_par.id;
DELETE 1
select m.* from m,r,purchase_par where m.a = r.b and m.b = purchase_par.id;
a | b
---+---
(0 rows)
select m.* from m,r,purchase_par where m.a = r.a and m.b = purchase_par.id;
a | b
---+----
1 | 4
2 | 8
5 | 20
(3 rows)
update m set b = m.b + 1 from r,purchase_par where m.a = r.a and m.b = purchase_par.id;
UPDATE 3
select m.* from m,r,purchase_par where m.a = r.a and m.b = purchase_par.id + 1;
a | b
---+----
1 | 5
5 | 21
2 | 9
(3 rows)
--Drop tables
drop table r;
DROP TABLE
drop table m;
DROP TABLE
drop table purchase_par cascade;
DROP TABLE
--
-- @created 2009-01-27 14:00:00
-- @modified 2013-06-24 17:00:00
-- @tags ddl schema_topology
-- @description Joins
-- start_ignore
drop table if exists r;
psql:/path/sql_file:1: NOTICE: table "r" does not exist, skipping
DROP TABLE
drop table if exists m;
psql:/path/sql_file:1: NOTICE: table "m" does not exist, skipping
DROP TABLE
drop table if exists purchase_par cascade;
psql:/path/sql_file:1: NOTICE: table "purchase_par" does not exist, skipping
DROP TABLE
-- 3 tables: heap, master-only and partitioned table
-- end_ignore
create table r (a int, b int) distributed by (a);
CREATE TABLE
create table m ();
psql:/path/sql_file:1: NOTICE: Table has no attributes to distribute on.
CREATE TABLE
alter table m add column a int;
ALTER TABLE
alter table m add column b int;
ALTER TABLE
CREATE TABLE purchase_par (id int, year int, month int, day int, region text)
DISTRIBUTED BY (id)
PARTITION BY LIST (region)
( PARTITION usa VALUES ('south'),
PARTITION europe VALUES ('north'),
PARTITION asia VALUES ('east'),
DEFAULT PARTITION other_regions)
;
psql:/path/sql_file:1: NOTICE: CREATE TABLE will create partition "purchase_par_1_prt_usa" for table "purchase_par"
psql:/path/sql_file:1: NOTICE: CREATE TABLE will create partition "purchase_par_1_prt_europe" for table "purchase_par"
psql:/path/sql_file:1: NOTICE: CREATE TABLE will create partition "purchase_par_1_prt_asia" for table "purchase_par"
psql:/path/sql_file:1: NOTICE: CREATE TABLE will create partition "purchase_par_1_prt_other_regions" for table "purchase_par"
CREATE TABLE
insert into r select generate_series(1, 10000), generate_series(1, 10000) * 3;
INSERT 0 10000
insert into m select generate_series(1, 1000), generate_series(1, 1000) * 4;
INSERT 0 1000
Insert into purchase_par values(1,2009,13,29,'east');
INSERT 0 1
Insert into purchase_par values(2,2009,13,29,'west');
INSERT 0 1
Insert into purchase_par values(3,2009,13,29,'north');
INSERT 0 1
Insert into purchase_par values(4,2009,13,29,'south');
INSERT 0 1
Insert into purchase_par values(5,2009,13,29,'east');
INSERT 0 1
Insert into purchase_par values(6,2009,13,29,'west');
INSERT 0 1
Insert into purchase_par values(7,2002,13,29,'north');
INSERT 0 1
Insert into purchase_par values(8,2003,13,29,'south');
INSERT 0 1
Insert into purchase_par values(9,2004,13,29,'east');
INSERT 0 1
Insert into purchase_par values(10,2005,13,29,'west');
INSERT 0 1
Insert into purchase_par values(11,2006,13,29,'north');
INSERT 0 1
Insert into purchase_par values(12,2007,01,29,'south');
INSERT 0 1
Insert into purchase_par values(13,2008,02,29,'east');
INSERT 0 1
Insert into purchase_par values(14,2002,03,29,'west');
INSERT 0 1
Insert into purchase_par values(15,2003,04,29,'north');
INSERT 0 1
Insert into purchase_par values(16,2004,05,29,'south');
INSERT 0 1
Insert into purchase_par values(17,2005,06,29,'east');
INSERT 0 1
Insert into purchase_par values(18,2006,07,29,'west');
INSERT 0 1
Insert into purchase_par values(19,2007,08,29,'north');
INSERT 0 1
Insert into purchase_par values(20,2008,09,29,'south');
INSERT 0 1
-- partitioned table: update --
select purchase_par.* from purchase_par where id in (select m.b from m, r where m.a = r.b) and day in (select a from r);
id | year | month | day | region
----+------+-------+-----+--------
12 | 2007 | 1 | 29 | south
(1 row)
update purchase_par set region = 'new_region' where id in (select m.b from m, r where m.a = r.b) and day in (select a from r);
psql:/path/sql_file:1: ERROR: moving tuple from partition "purchase_par_1_prt_usa" to partition "purchase_par_1_prt_other_regions" not supported (seg0 rh55-qavm67:10532 pid=17153)
select purchase_par.* from purchase_par where id in (select m.b from m, r where m.a = r.b) and day in (select a from r);
id | year | month | day | region
----+------+-------+-----+--------
12 | 2007 | 1 | 29 | south
(1 row)
select purchase_par.* from purchase_par,m,r where purchase_par.id = m.b and purchase_par.month = r.b;
id | year | month | day | region
----+------+-------+-----+--------
20 | 2008 | 9 | 29 | south
(1 row)
update purchase_par set month = month+1 from r,m where purchase_par.id = m.b and purchase_par.month = r.b;
UPDATE 1
select purchase_par.* from purchase_par,m,r where purchase_par.id = m.b and purchase_par.month = r.b+1;
id | year | month | day | region
----+------+-------+-----+--------
20 | 2008 | 10 | 29 | south
8 | 2003 | 13 | 29 | south
4 | 2009 | 13 | 29 | south
(3 rows)
-- heap table: delete --
select * from r where b in (select month-1 from purchase_par,m where purchase_par.id = m.b);
a | b
---+----
3 | 9
4 | 12
(2 rows)
delete from r where b in (select month-1 from purchase_par,m where purchase_par.id = m.b);
DELETE 2
select * from r where b in (select month-1 from purchase_par,m where purchase_par.id = m.b);
a | b
---+---
(0 rows)
-- master-only table: update
select m.* from m,r,purchase_par where m.a = r.b and m.b = purchase_par.id;
a | b
---+----
3 | 12
(1 row)
delete from m using r,purchase_par where m.a = r.b and m.b = purchase_par.id;
DELETE 1
select m.* from m,r,purchase_par where m.a = r.b and m.b = purchase_par.id;
a | b
---+---
(0 rows)
select m.* from m,r,purchase_par where m.a = r.a and m.b = purchase_par.id;
a | b
---+----
1 | 4
2 | 8
5 | 20
(3 rows)
update m set b = m.b + 1 from r,purchase_par where m.a = r.a and m.b = purchase_par.id;
UPDATE 3
select m.* from m,r,purchase_par where m.a = r.a and m.b = purchase_par.id + 1;
a | b
---+----
1 | 5
2 | 9
5 | 21
(3 rows)
--Drop tables
drop table r;
DROP TABLE
drop table m;
DROP TABLE
drop table purchase_par cascade;
DROP TABLE
-- start_ignore
-- end_ignore
--
-- @created 2009-01-27 14:00:00
-- @modified 2013-06-24 17:00:00
-- @tags ddl schema_topology
-- @description Joins
-- start_ignore
drop table if exists r;
psql:/path/sql_file:1: NOTICE: table "r" does not exist, skipping
DROP TABLE
drop table if exists m;
psql:/path/sql_file:1: NOTICE: table "m" does not exist, skipping
DROP TABLE
drop table if exists purchase_par cascade;
psql:/path/sql_file:1: NOTICE: table "purchase_par" does not exist, skipping
DROP TABLE
-- 3 tables: heap, master-only and partitioned table
-- end_ignore
create table r (a int, b int) distributed by (a);
CREATE TABLE
create table m ();
psql:/path/sql_file:1: NOTICE: Table has no attributes to distribute on.
CREATE TABLE
alter table m add column a int;
ALTER TABLE
alter table m add column b int;
ALTER TABLE
CREATE TABLE purchase_par (id int, year int, month int, day int, region text)
DISTRIBUTED BY (id)
PARTITION BY LIST (region)
( PARTITION usa VALUES ('south'),
PARTITION europe VALUES ('north'),
PARTITION asia VALUES ('east'),
DEFAULT PARTITION other_regions)
;
psql:/path/sql_file:1: NOTICE: CREATE TABLE will create partition "purchase_par_1_prt_usa" for table "purchase_par"
psql:/path/sql_file:1: NOTICE: CREATE TABLE will create partition "purchase_par_1_prt_europe" for table "purchase_par"
psql:/path/sql_file:1: NOTICE: CREATE TABLE will create partition "purchase_par_1_prt_asia" for table "purchase_par"
psql:/path/sql_file:1: NOTICE: CREATE TABLE will create partition "purchase_par_1_prt_other_regions" for table "purchase_par"
CREATE TABLE
insert into r select generate_series(1, 10000), generate_series(1, 10000) * 3;
INSERT 0 10000
insert into m select generate_series(1, 1000), generate_series(1, 1000) * 4;
INSERT 0 1000
Insert into purchase_par values(1,2009,13,29,'east');
INSERT 0 1
Insert into purchase_par values(2,2009,13,29,'west');
INSERT 0 1
Insert into purchase_par values(3,2009,13,29,'north');
INSERT 0 1
Insert into purchase_par values(4,2009,13,29,'south');
INSERT 0 1
Insert into purchase_par values(5,2009,13,29,'east');
INSERT 0 1
Insert into purchase_par values(6,2009,13,29,'west');
INSERT 0 1
Insert into purchase_par values(7,2002,13,29,'north');
INSERT 0 1
Insert into purchase_par values(8,2003,13,29,'south');
INSERT 0 1
Insert into purchase_par values(9,2004,13,29,'east');
INSERT 0 1
Insert into purchase_par values(10,2005,13,29,'west');
INSERT 0 1
Insert into purchase_par values(11,2006,13,29,'north');
INSERT 0 1
Insert into purchase_par values(12,2007,01,29,'south');
INSERT 0 1
Insert into purchase_par values(13,2008,02,29,'east');
INSERT 0 1
Insert into purchase_par values(14,2002,03,29,'west');
INSERT 0 1
Insert into purchase_par values(15,2003,04,29,'north');
INSERT 0 1
Insert into purchase_par values(16,2004,05,29,'south');
INSERT 0 1
Insert into purchase_par values(17,2005,06,29,'east');
INSERT 0 1
Insert into purchase_par values(18,2006,07,29,'west');
INSERT 0 1
Insert into purchase_par values(19,2007,08,29,'north');
INSERT 0 1
Insert into purchase_par values(20,2008,09,29,'south');
INSERT 0 1
-- partitioned table: update --
select purchase_par.* from purchase_par where id in (select m.b from m, r where m.a = r.b) and day in (select a from r);
id | year | month | day | region
----+------+-------+-----+--------
12 | 2007 | 1 | 29 | south
(1 row)
update purchase_par set region = 'new_region' where id in (select m.b from m, r where m.a = r.b) and day in (select a from r);
psql:/path/sql_file:1: ERROR: moving tuple from partition "purchase_par_1_prt_usa" to partition "purchase_par_1_prt_other_regions" not supported (seg5 sdw14:25105 pid=18355)
select purchase_par.* from purchase_par where id in (select m.b from m, r where m.a = r.b) and day in (select a from r);
id | year | month | day | region
----+------+-------+-----+--------
12 | 2007 | 1 | 29 | south
(1 row)
select purchase_par.* from purchase_par,m,r where purchase_par.id = m.b and purchase_par.month = r.b;
id | year | month | day | region
----+------+-------+-----+--------
20 | 2008 | 9 | 29 | south
(1 row)
update purchase_par set month = month+1 from r,m where purchase_par.id = m.b and purchase_par.month = r.b;
UPDATE 1
select purchase_par.* from purchase_par,m,r where purchase_par.id = m.b and purchase_par.month = r.b+1;
id | year | month | day | region
----+------+-------+-----+--------
20 | 2008 | 10 | 29 | south
8 | 2003 | 13 | 29 | south
4 | 2009 | 13 | 29 | south
(3 rows)
-- heap table: delete --
select * from r where b in (select month-1 from purchase_par,m where purchase_par.id = m.b);
a | b
---+----
3 | 9
4 | 12
(2 rows)
delete from r where b in (select month-1 from purchase_par,m where purchase_par.id = m.b);
DELETE 2
select * from r where b in (select month-1 from purchase_par,m where purchase_par.id = m.b);
a | b
---+---
(0 rows)
-- master-only table: update
select m.* from m,r,purchase_par where m.a = r.b and m.b = purchase_par.id;
a | b
---+----
3 | 12
(1 row)
delete from m using r,purchase_par where m.a = r.b and m.b = purchase_par.id;
DELETE 1
select m.* from m,r,purchase_par where m.a = r.b and m.b = purchase_par.id;
a | b
---+---
(0 rows)
select m.* from m,r,purchase_par where m.a = r.a and m.b = purchase_par.id;
a | b
---+----
2 | 8
1 | 4
5 | 20
(3 rows)
update m set b = m.b + 1 from r,purchase_par where m.a = r.a and m.b = purchase_par.id;
UPDATE 3
select m.* from m,r,purchase_par where m.a = r.a and m.b = purchase_par.id + 1;
a | b
---+----
2 | 9
1 | 5
5 | 21
(3 rows)
--Drop tables
drop table r;
DROP TABLE
drop table m;
DROP TABLE
drop table purchase_par cascade;
DROP TABLE
--
-- @created 2009-01-27 14:00:00
-- @modified 2013-06-24 17:00:00
-- @tags ddl schema_topology
-- @description Joins
-- start_ignore
drop table if exists r;
psql:/path/sql_file:1: NOTICE: table "r" does not exist, skipping
DROP TABLE
drop table if exists m;
psql:/path/sql_file:1: NOTICE: table "m" does not exist, skipping
DROP TABLE
drop table if exists purchase_par cascade;
psql:/path/sql_file:1: NOTICE: table "purchase_par" does not exist, skipping
DROP TABLE
-- 3 tables: heap, master-only and partitioned table
-- end_ignore
create table r (a int, b int) distributed by (a);
CREATE TABLE
create table m ();
psql:/path/sql_file:1: NOTICE: Table has no attributes to distribute on.
CREATE TABLE
alter table m add column a int;
ALTER TABLE
alter table m add column b int;
ALTER TABLE
CREATE TABLE purchase_par (id int, year int, month int, day int, region text)
DISTRIBUTED BY (id)
PARTITION BY LIST (region)
( PARTITION usa VALUES ('south'),
PARTITION europe VALUES ('north'),
PARTITION asia VALUES ('east'),
DEFAULT PARTITION other_regions)
;
psql:/path/sql_file:1: NOTICE: CREATE TABLE will create partition "purchase_par_1_prt_usa" for table "purchase_par"
psql:/path/sql_file:1: NOTICE: CREATE TABLE will create partition "purchase_par_1_prt_europe" for table "purchase_par"
psql:/path/sql_file:1: NOTICE: CREATE TABLE will create partition "purchase_par_1_prt_asia" for table "purchase_par"
psql:/path/sql_file:1: NOTICE: CREATE TABLE will create partition "purchase_par_1_prt_other_regions" for table "purchase_par"
CREATE TABLE
insert into r select generate_series(1, 10000), generate_series(1, 10000) * 3;
INSERT 0 10000
insert into m select generate_series(1, 1000), generate_series(1, 1000) * 4;
INSERT 0 1000
Insert into purchase_par values(1,2009,13,29,'east');
INSERT 0 1
Insert into purchase_par values(2,2009,13,29,'west');
INSERT 0 1
Insert into purchase_par values(3,2009,13,29,'north');
INSERT 0 1
Insert into purchase_par values(4,2009,13,29,'south');
INSERT 0 1
Insert into purchase_par values(5,2009,13,29,'east');
INSERT 0 1
Insert into purchase_par values(6,2009,13,29,'west');
INSERT 0 1
Insert into purchase_par values(7,2002,13,29,'north');
INSERT 0 1
Insert into purchase_par values(8,2003,13,29,'south');
INSERT 0 1
Insert into purchase_par values(9,2004,13,29,'east');
INSERT 0 1
Insert into purchase_par values(10,2005,13,29,'west');
INSERT 0 1
Insert into purchase_par values(11,2006,13,29,'north');
INSERT 0 1
Insert into purchase_par values(12,2007,01,29,'south');
INSERT 0 1
Insert into purchase_par values(13,2008,02,29,'east');
INSERT 0 1
Insert into purchase_par values(14,2002,03,29,'west');
INSERT 0 1
Insert into purchase_par values(15,2003,04,29,'north');
INSERT 0 1
Insert into purchase_par values(16,2004,05,29,'south');
INSERT 0 1
Insert into purchase_par values(17,2005,06,29,'east');
INSERT 0 1
Insert into purchase_par values(18,2006,07,29,'west');
INSERT 0 1
Insert into purchase_par values(19,2007,08,29,'north');
INSERT 0 1
Insert into purchase_par values(20,2008,09,29,'south');
INSERT 0 1
-- partitioned table: update --
select purchase_par.* from purchase_par where id in (select m.b from m, r where m.a = r.b) and day in (select a from r);
id | year | month | day | region
----+------+-------+-----+--------
12 | 2007 | 1 | 29 | south
(1 row)
update purchase_par set region = 'new_region' where id in (select m.b from m, r where m.a = r.b) and day in (select a from r);
psql:/path/sql_file:1: ERROR: moving tuple from partition "purchase_par_1_prt_usa" to partition "purchase_par_1_prt_other_regions" not supported (seg1 rh55-qavm74:7393 pid=30843)
select purchase_par.* from purchase_par where id in (select m.b from m, r where m.a = r.b) and day in (select a from r);
id | year | month | day | region
----+------+-------+-----+--------
12 | 2007 | 1 | 29 | south
(1 row)
select purchase_par.* from purchase_par,m,r where purchase_par.id = m.b and purchase_par.month = r.b;
id | year | month | day | region
----+------+-------+-----+--------
20 | 2008 | 9 | 29 | south
(1 row)
update purchase_par set month = month+1 from r,m where purchase_par.id = m.b and purchase_par.month = r.b;
UPDATE 1
select purchase_par.* from purchase_par,m,r where purchase_par.id = m.b and purchase_par.month = r.b+1;
id | year | month | day | region
----+------+-------+-----+--------
20 | 2008 | 10 | 29 | south
8 | 2003 | 13 | 29 | south
4 | 2009 | 13 | 29 | south
(3 rows)
-- heap table: delete --
select * from r where b in (select month-1 from purchase_par,m where purchase_par.id = m.b);
a | b
---+----
3 | 9
4 | 12
(2 rows)
delete from r where b in (select month-1 from purchase_par,m where purchase_par.id = m.b);
DELETE 2
select * from r where b in (select month-1 from purchase_par,m where purchase_par.id = m.b);
a | b
---+---
(0 rows)
-- master-only table: update
select m.* from m,r,purchase_par where m.a = r.b and m.b = purchase_par.id;
a | b
---+----
3 | 12
(1 row)
delete from m using r,purchase_par where m.a = r.b and m.b = purchase_par.id;
DELETE 1
select m.* from m,r,purchase_par where m.a = r.b and m.b = purchase_par.id;
a | b
---+---
(0 rows)
select m.* from m,r,purchase_par where m.a = r.a and m.b = purchase_par.id;
a | b
---+----
1 | 4
2 | 8
5 | 20
(3 rows)
update m set b = m.b + 1 from r,purchase_par where m.a = r.a and m.b = purchase_par.id;
UPDATE 3
select m.* from m,r,purchase_par where m.a = r.a and m.b = purchase_par.id + 1;
a | b
---+----
1 | 5
5 | 21
2 | 9
(3 rows)
--Drop tables
drop table r;
DROP TABLE
drop table m;
DROP TABLE
drop table purchase_par cascade;
DROP TABLE
--
-- @created 2009-01-27 14:00:00
-- @modified 2013-06-24 17:00:00
-- @tags ddl schema_topology
-- @description Joins
-- start_ignore
drop table if exists r;
psql:/path/sql_file:1: NOTICE: table "r" does not exist, skipping
DROP TABLE
drop table if exists m;
psql:/path/sql_file:1: NOTICE: table "m" does not exist, skipping
DROP TABLE
drop table if exists purchase_par cascade;
psql:/path/sql_file:1: NOTICE: table "purchase_par" does not exist, skipping
DROP TABLE
-- 3 tables: heap, master-only and partitioned table
-- end_ignore
create table r (a int, b int) distributed by (a);
CREATE TABLE
create table m ();
psql:/path/sql_file:1: NOTICE: Table has no attributes to distribute on.
CREATE TABLE
alter table m add column a int;
ALTER TABLE
alter table m add column b int;
ALTER TABLE
CREATE TABLE purchase_par (id int, year int, month int, day int, region text)
DISTRIBUTED BY (id)
PARTITION BY LIST (region)
( PARTITION usa VALUES ('south'),
PARTITION europe VALUES ('north'),
PARTITION asia VALUES ('east'),
DEFAULT PARTITION other_regions)
;
psql:/path/sql_file:1: NOTICE: CREATE TABLE will create partition "purchase_par_1_prt_usa" for table "purchase_par"
psql:/path/sql_file:1: NOTICE: CREATE TABLE will create partition "purchase_par_1_prt_europe" for table "purchase_par"
psql:/path/sql_file:1: NOTICE: CREATE TABLE will create partition "purchase_par_1_prt_asia" for table "purchase_par"
psql:/path/sql_file:1: NOTICE: CREATE TABLE will create partition "purchase_par_1_prt_other_regions" for table "purchase_par"
CREATE TABLE
insert into r select generate_series(1, 10000), generate_series(1, 10000) * 3;
INSERT 0 10000
insert into m select generate_series(1, 1000), generate_series(1, 1000) * 4;
INSERT 0 1000
Insert into purchase_par values(1,2009,13,29,'east');
INSERT 0 1
Insert into purchase_par values(2,2009,13,29,'west');
INSERT 0 1
Insert into purchase_par values(3,2009,13,29,'north');
INSERT 0 1
Insert into purchase_par values(4,2009,13,29,'south');
INSERT 0 1
Insert into purchase_par values(5,2009,13,29,'east');
INSERT 0 1
Insert into purchase_par values(6,2009,13,29,'west');
INSERT 0 1
Insert into purchase_par values(7,2002,13,29,'north');
INSERT 0 1
Insert into purchase_par values(8,2003,13,29,'south');
INSERT 0 1
Insert into purchase_par values(9,2004,13,29,'east');
INSERT 0 1
Insert into purchase_par values(10,2005,13,29,'west');
INSERT 0 1
Insert into purchase_par values(11,2006,13,29,'north');
INSERT 0 1
Insert into purchase_par values(12,2007,01,29,'south');
INSERT 0 1
Insert into purchase_par values(13,2008,02,29,'east');
INSERT 0 1
Insert into purchase_par values(14,2002,03,29,'west');
INSERT 0 1
Insert into purchase_par values(15,2003,04,29,'north');
INSERT 0 1
Insert into purchase_par values(16,2004,05,29,'south');
INSERT 0 1
Insert into purchase_par values(17,2005,06,29,'east');
INSERT 0 1
Insert into purchase_par values(18,2006,07,29,'west');
INSERT 0 1
Insert into purchase_par values(19,2007,08,29,'north');
INSERT 0 1
Insert into purchase_par values(20,2008,09,29,'south');
INSERT 0 1
-- partitioned table: update --
select purchase_par.* from purchase_par where id in (select m.b from m, r where m.a = r.b) and day in (select a from r);
id | year | month | day | region
----+------+-------+-----+--------
12 | 2007 | 1 | 29 | south
(1 row)
update purchase_par set region = 'new_region' where id in (select m.b from m, r where m.a = r.b) and day in (select a from r);
psql:/path/sql_file:1: ERROR: moving tuple from partition "purchase_par_1_prt_usa" to partition "purchase_par_1_prt_other_regions" not supported (seg1 rh55-qavm68:7656 pid=16904)
select purchase_par.* from purchase_par where id in (select m.b from m, r where m.a = r.b) and day in (select a from r);
id | year | month | day | region
----+------+-------+-----+--------
12 | 2007 | 1 | 29 | south
(1 row)
select purchase_par.* from purchase_par,m,r where purchase_par.id = m.b and purchase_par.month = r.b;
id | year | month | day | region
----+------+-------+-----+--------
20 | 2008 | 9 | 29 | south
(1 row)
update purchase_par set month = month+1 from r,m where purchase_par.id = m.b and purchase_par.month = r.b;
UPDATE 1
select purchase_par.* from purchase_par,m,r where purchase_par.id = m.b and purchase_par.month = r.b+1;
id | year | month | day | region
----+------+-------+-----+--------
20 | 2008 | 10 | 29 | south
8 | 2003 | 13 | 29 | south
4 | 2009 | 13 | 29 | south
(3 rows)
-- heap table: delete --
select * from r where b in (select month-1 from purchase_par,m where purchase_par.id = m.b);
a | b
---+----
3 | 9
4 | 12
(2 rows)
delete from r where b in (select month-1 from purchase_par,m where purchase_par.id = m.b);
DELETE 2
select * from r where b in (select month-1 from purchase_par,m where purchase_par.id = m.b);
a | b
---+---
(0 rows)
-- master-only table: update
select m.* from m,r,purchase_par where m.a = r.b and m.b = purchase_par.id;
a | b
---+----
3 | 12
(1 row)
delete from m using r,purchase_par where m.a = r.b and m.b = purchase_par.id;
DELETE 1
select m.* from m,r,purchase_par where m.a = r.b and m.b = purchase_par.id;
a | b
---+---
(0 rows)
select m.* from m,r,purchase_par where m.a = r.a and m.b = purchase_par.id;
a | b
---+----
1 | 4
2 | 8
5 | 20
(3 rows)
update m set b = m.b + 1 from r,purchase_par where m.a = r.a and m.b = purchase_par.id;
UPDATE 3
select m.* from m,r,purchase_par where m.a = r.a and m.b = purchase_par.id + 1;
a | b
---+----
1 | 5
2 | 9
5 | 21
(3 rows)
--Drop tables
drop table r;
DROP TABLE
drop table m;
DROP TABLE
drop table purchase_par cascade;
DROP TABLE
--
-- @created 2009-01-27 14:00:00
-- @modified 2013-06-24 17:00:00
-- @tags ddl schema_topology
-- @description Joins
-- start_ignore
drop table if exists r;
psql:/path/sql_file:1: NOTICE: table "r" does not exist, skipping
DROP TABLE
drop table if exists m;
psql:/path/sql_file:1: NOTICE: table "m" does not exist, skipping
DROP TABLE
drop table if exists purchase_par cascade;
psql:/path/sql_file:1: NOTICE: table "purchase_par" does not exist, skipping
DROP TABLE
-- 3 tables: heap, master-only and partitioned table
-- end_ignore
create table r (a int, b int) distributed by (a);
CREATE TABLE
create table m ();
psql:/path/sql_file:1: NOTICE: Table has no attributes to distribute on.
CREATE TABLE
alter table m add column a int;
ALTER TABLE
alter table m add column b int;
ALTER TABLE
CREATE TABLE purchase_par (id int, year int, month int, day int, region text)
DISTRIBUTED BY (id)
PARTITION BY LIST (region)
( PARTITION usa VALUES ('south'),
PARTITION europe VALUES ('north'),
PARTITION asia VALUES ('east'),
DEFAULT PARTITION other_regions)
;
psql:/path/sql_file:1: NOTICE: CREATE TABLE will create partition "purchase_par_1_prt_usa" for table "purchase_par"
psql:/path/sql_file:1: NOTICE: CREATE TABLE will create partition "purchase_par_1_prt_europe" for table "purchase_par"
psql:/path/sql_file:1: NOTICE: CREATE TABLE will create partition "purchase_par_1_prt_asia" for table "purchase_par"
psql:/path/sql_file:1: NOTICE: CREATE TABLE will create partition "purchase_par_1_prt_other_regions" for table "purchase_par"
CREATE TABLE
insert into r select generate_series(1, 10000), generate_series(1, 10000) * 3;
INSERT 0 10000
insert into m select generate_series(1, 1000), generate_series(1, 1000) * 4;
INSERT 0 1000
Insert into purchase_par values(1,2009,13,29,'east');
INSERT 0 1
Insert into purchase_par values(2,2009,13,29,'west');
INSERT 0 1
Insert into purchase_par values(3,2009,13,29,'north');
INSERT 0 1
Insert into purchase_par values(4,2009,13,29,'south');
INSERT 0 1
Insert into purchase_par values(5,2009,13,29,'east');
INSERT 0 1
Insert into purchase_par values(6,2009,13,29,'west');
INSERT 0 1
Insert into purchase_par values(7,2002,13,29,'north');
INSERT 0 1
Insert into purchase_par values(8,2003,13,29,'south');
INSERT 0 1
Insert into purchase_par values(9,2004,13,29,'east');
INSERT 0 1
Insert into purchase_par values(10,2005,13,29,'west');
INSERT 0 1
Insert into purchase_par values(11,2006,13,29,'north');
INSERT 0 1
Insert into purchase_par values(12,2007,01,29,'south');
INSERT 0 1
Insert into purchase_par values(13,2008,02,29,'east');
INSERT 0 1
Insert into purchase_par values(14,2002,03,29,'west');
INSERT 0 1
Insert into purchase_par values(15,2003,04,29,'north');
INSERT 0 1
Insert into purchase_par values(16,2004,05,29,'south');
INSERT 0 1
Insert into purchase_par values(17,2005,06,29,'east');
INSERT 0 1
Insert into purchase_par values(18,2006,07,29,'west');
INSERT 0 1
Insert into purchase_par values(19,2007,08,29,'north');
INSERT 0 1
Insert into purchase_par values(20,2008,09,29,'south');
INSERT 0 1
-- partitioned table: update --
select purchase_par.* from purchase_par where id in (select m.b from m, r where m.a = r.b) and day in (select a from r);
id | year | month | day | region
----+------+-------+-----+--------
12 | 2007 | 1 | 29 | south
(1 row)
update purchase_par set region = 'new_region' where id in (select m.b from m, r where m.a = r.b) and day in (select a from r);
psql:/path/sql_file:1: ERROR: moving tuple from partition "purchase_par_1_prt_usa" to partition "purchase_par_1_prt_other_regions" not supported (seg5 rh55-qavm70:25656 pid=29172)
select purchase_par.* from purchase_par where id in (select m.b from m, r where m.a = r.b) and day in (select a from r);
id | year | month | day | region
----+------+-------+-----+--------
12 | 2007 | 1 | 29 | south
(1 row)
select purchase_par.* from purchase_par,m,r where purchase_par.id = m.b and purchase_par.month = r.b;
id | year | month | day | region
----+------+-------+-----+--------
20 | 2008 | 9 | 29 | south
(1 row)
update purchase_par set month = month+1 from r,m where purchase_par.id = m.b and purchase_par.month = r.b;
UPDATE 1
select purchase_par.* from purchase_par,m,r where purchase_par.id = m.b and purchase_par.month = r.b+1;
id | year | month | day | region
----+------+-------+-----+--------
8 | 2003 | 13 | 29 | south
4 | 2009 | 13 | 29 | south
20 | 2008 | 10 | 29 | south
(3 rows)
-- heap table: delete --
select * from r where b in (select month-1 from purchase_par,m where purchase_par.id = m.b);
a | b
---+----
4 | 12
3 | 9
(2 rows)
delete from r where b in (select month-1 from purchase_par,m where purchase_par.id = m.b);
DELETE 2
select * from r where b in (select month-1 from purchase_par,m where purchase_par.id = m.b);
a | b
---+---
(0 rows)
-- master-only table: update
select m.* from m,r,purchase_par where m.a = r.b and m.b = purchase_par.id;
a | b
---+----
3 | 12
(1 row)
delete from m using r,purchase_par where m.a = r.b and m.b = purchase_par.id;
DELETE 1
select m.* from m,r,purchase_par where m.a = r.b and m.b = purchase_par.id;
a | b
---+---
(0 rows)
select m.* from m,r,purchase_par where m.a = r.a and m.b = purchase_par.id;
a | b
---+----
1 | 4
5 | 20
2 | 8
(3 rows)
update m set b = m.b + 1 from r,purchase_par where m.a = r.a and m.b = purchase_par.id;
UPDATE 3
select m.* from m,r,purchase_par where m.a = r.a and m.b = purchase_par.id + 1;
a | b
---+----
2 | 9
1 | 5
5 | 21
(3 rows)
--Drop tables
drop table r;
DROP TABLE
drop table m;
DROP TABLE
drop table purchase_par cascade;
DROP TABLE
-- start_ignore
SET optimizer=on;
SET
SET optimizer_log=on;
SET
-- end_ignore
--
-- @created 2009-01-27 14:00:00
-- @modified 2013-06-24 17:00:00
-- @tags ddl schema_topology
-- @description Joins
-- start_ignore
drop table if exists r;
psql:/path/sql_file:1: NOTICE: table "r" does not exist, skipping
DROP TABLE
drop table if exists m;
psql:/path/sql_file:1: NOTICE: table "m" does not exist, skipping
DROP TABLE
drop table if exists purchase_par cascade;
psql:/path/sql_file:1: NOTICE: table "purchase_par" does not exist, skipping
DROP TABLE
-- 3 tables: heap, master-only and partitioned table
-- end_ignore
create table r (a int, b int) distributed by (a);
CREATE TABLE
create table m ();
psql:/path/sql_file:1: NOTICE: Table has no attributes to distribute on.
CREATE TABLE
alter table m add column a int;
ALTER TABLE
alter table m add column b int;
ALTER TABLE
CREATE TABLE purchase_par (id int, year int, month int, day int, region text)
DISTRIBUTED BY (id)
PARTITION BY LIST (region)
( PARTITION usa VALUES ('south'),
PARTITION europe VALUES ('north'),
PARTITION asia VALUES ('east'),
DEFAULT PARTITION other_regions)
;
psql:/path/sql_file:1: NOTICE: CREATE TABLE will create partition "purchase_par_1_prt_usa" for table "purchase_par"
psql:/path/sql_file:1: NOTICE: CREATE TABLE will create partition "purchase_par_1_prt_europe" for table "purchase_par"
psql:/path/sql_file:1: NOTICE: CREATE TABLE will create partition "purchase_par_1_prt_asia" for table "purchase_par"
psql:/path/sql_file:1: NOTICE: CREATE TABLE will create partition "purchase_par_1_prt_other_regions" for table "purchase_par"
CREATE TABLE
insert into r select generate_series(1, 10000), generate_series(1, 10000) * 3;
INSERT 0 10000
insert into m select generate_series(1, 1000), generate_series(1, 1000) * 4;
INSERT 0 1000
Insert into purchase_par values(1,2009,13,29,'east');
INSERT 0 1
Insert into purchase_par values(2,2009,13,29,'west');
INSERT 0 1
Insert into purchase_par values(3,2009,13,29,'north');
INSERT 0 1
Insert into purchase_par values(4,2009,13,29,'south');
INSERT 0 1
Insert into purchase_par values(5,2009,13,29,'east');
INSERT 0 1
Insert into purchase_par values(6,2009,13,29,'west');
INSERT 0 1
Insert into purchase_par values(7,2002,13,29,'north');
INSERT 0 1
Insert into purchase_par values(8,2003,13,29,'south');
INSERT 0 1
Insert into purchase_par values(9,2004,13,29,'east');
INSERT 0 1
Insert into purchase_par values(10,2005,13,29,'west');
INSERT 0 1
Insert into purchase_par values(11,2006,13,29,'north');
INSERT 0 1
Insert into purchase_par values(12,2007,01,29,'south');
INSERT 0 1
Insert into purchase_par values(13,2008,02,29,'east');
INSERT 0 1
Insert into purchase_par values(14,2002,03,29,'west');
INSERT 0 1
Insert into purchase_par values(15,2003,04,29,'north');
INSERT 0 1
Insert into purchase_par values(16,2004,05,29,'south');
INSERT 0 1
Insert into purchase_par values(17,2005,06,29,'east');
INSERT 0 1
Insert into purchase_par values(18,2006,07,29,'west');
INSERT 0 1
Insert into purchase_par values(19,2007,08,29,'north');
INSERT 0 1
Insert into purchase_par values(20,2008,09,29,'south');
INSERT 0 1
-- partitioned table: update --
select purchase_par.* from purchase_par where id in (select m.b from m, r where m.a = r.b) and day in (select a from r);
id | year | month | day | region
----+------+-------+-----+--------
12 | 2007 | 1 | 29 | south
(1 row)
update purchase_par set region = 'new_region' where id in (select m.b from m, r where m.a = r.b) and day in (select a from r);
UPDATE 1
select purchase_par.* from purchase_par where id in (select m.b from m, r where m.a = r.b) and day in (select a from r);
id | year | month | day | region
----+------+-------+-----+------------
12 | 2007 | 1 | 29 | new_region
(1 row)
select purchase_par.* from purchase_par,m,r where purchase_par.id = m.b and purchase_par.month = r.b;
id | year | month | day | region
----+------+-------+-----+--------
20 | 2008 | 9 | 29 | south
(1 row)
update purchase_par set month = month+1 from r,m where purchase_par.id = m.b and purchase_par.month = r.b;
UPDATE 1
select purchase_par.* from purchase_par,m,r where purchase_par.id = m.b and purchase_par.month = r.b+1;
id | year | month | day | region
----+------+-------+-----+--------
20 | 2008 | 10 | 29 | south
8 | 2003 | 13 | 29 | south
4 | 2009 | 13 | 29 | south
(3 rows)
-- heap table: delete --
select * from r where b in (select month-1 from purchase_par,m where purchase_par.id = m.b);
a | b
---+----
3 | 9
4 | 12
(2 rows)
delete from r where b in (select month-1 from purchase_par,m where purchase_par.id = m.b);
DELETE 2
select * from r where b in (select month-1 from purchase_par,m where purchase_par.id = m.b);
a | b
---+---
(0 rows)
-- master-only table: update
select m.* from m,r,purchase_par where m.a = r.b and m.b = purchase_par.id;
a | b
---+----
3 | 12
(1 row)
delete from m using r,purchase_par where m.a = r.b and m.b = purchase_par.id;
DELETE 1
select m.* from m,r,purchase_par where m.a = r.b and m.b = purchase_par.id;
a | b
---+---
(0 rows)
select m.* from m,r,purchase_par where m.a = r.a and m.b = purchase_par.id;
a | b
---+----
2 | 8
1 | 4
5 | 20
(3 rows)
update m set b = m.b + 1 from r,purchase_par where m.a = r.a and m.b = purchase_par.id;
UPDATE 3
select m.* from m,r,purchase_par where m.a = r.a and m.b = purchase_par.id + 1;
a | b
---+----
1 | 5
5 | 21
2 | 9
(3 rows)
--Drop tables
drop table r;
DROP TABLE
drop table m;
DROP TABLE
drop table purchase_par cascade;
DROP TABLE
-- start_ignore
-- end_ignore
--
-- @created 2009-01-27 14:00:00
-- @modified 2013-06-24 17:00:00
-- @tags ddl schema_topology
-- @description Joins
-- start_ignore
drop table if exists r;
psql:/path/sql_file:1: NOTICE: table "r" does not exist, skipping
DROP TABLE
drop table if exists m;
psql:/path/sql_file:1: NOTICE: table "m" does not exist, skipping
DROP TABLE
drop table if exists purchase_par cascade;
psql:/path/sql_file:1: NOTICE: table "purchase_par" does not exist, skipping
DROP TABLE
-- 3 tables: heap, master-only and partitioned table
-- end_ignore
create table r (a int, b int) distributed by (a);
CREATE TABLE
create table m ();
psql:/path/sql_file:1: NOTICE: Table has no attributes to distribute on.
CREATE TABLE
alter table m add column a int;
ALTER TABLE
alter table m add column b int;
ALTER TABLE
CREATE TABLE purchase_par (id int, year int, month int, day int, region text)
DISTRIBUTED BY (id)
PARTITION BY LIST (region)
( PARTITION usa VALUES ('south'),
PARTITION europe VALUES ('north'),
PARTITION asia VALUES ('east'),
DEFAULT PARTITION other_regions)
;
psql:/path/sql_file:1: NOTICE: CREATE TABLE will create partition "purchase_par_1_prt_usa" for table "purchase_par"
psql:/path/sql_file:1: NOTICE: CREATE TABLE will create partition "purchase_par_1_prt_europe" for table "purchase_par"
psql:/path/sql_file:1: NOTICE: CREATE TABLE will create partition "purchase_par_1_prt_asia" for table "purchase_par"
psql:/path/sql_file:1: NOTICE: CREATE TABLE will create partition "purchase_par_1_prt_other_regions" for table "purchase_par"
CREATE TABLE
insert into r select generate_series(1, 10000), generate_series(1, 10000) * 3;
INSERT 0 10000
insert into m select generate_series(1, 1000), generate_series(1, 1000) * 4;
INSERT 0 1000
Insert into purchase_par values(1,2009,13,29,'east');
INSERT 0 1
Insert into purchase_par values(2,2009,13,29,'west');
INSERT 0 1
Insert into purchase_par values(3,2009,13,29,'north');
INSERT 0 1
Insert into purchase_par values(4,2009,13,29,'south');
INSERT 0 1
Insert into purchase_par values(5,2009,13,29,'east');
INSERT 0 1
Insert into purchase_par values(6,2009,13,29,'west');
INSERT 0 1
Insert into purchase_par values(7,2002,13,29,'north');
INSERT 0 1
Insert into purchase_par values(8,2003,13,29,'south');
INSERT 0 1
Insert into purchase_par values(9,2004,13,29,'east');
INSERT 0 1
Insert into purchase_par values(10,2005,13,29,'west');
INSERT 0 1
Insert into purchase_par values(11,2006,13,29,'north');
INSERT 0 1
Insert into purchase_par values(12,2007,01,29,'south');
INSERT 0 1
Insert into purchase_par values(13,2008,02,29,'east');
INSERT 0 1
Insert into purchase_par values(14,2002,03,29,'west');
INSERT 0 1
Insert into purchase_par values(15,2003,04,29,'north');
INSERT 0 1
Insert into purchase_par values(16,2004,05,29,'south');
INSERT 0 1
Insert into purchase_par values(17,2005,06,29,'east');
INSERT 0 1
Insert into purchase_par values(18,2006,07,29,'west');
INSERT 0 1
Insert into purchase_par values(19,2007,08,29,'north');
INSERT 0 1
Insert into purchase_par values(20,2008,09,29,'south');
INSERT 0 1
-- partitioned table: update --
select purchase_par.* from purchase_par where id in (select m.b from m, r where m.a = r.b) and day in (select a from r);
id | year | month | day | region
----+------+-------+-----+--------
12 | 2007 | 1 | 29 | south
(1 row)
update purchase_par set region = 'new_region' where id in (select m.b from m, r where m.a = r.b) and day in (select a from r);
UPDATE 1
select purchase_par.* from purchase_par where id in (select m.b from m, r where m.a = r.b) and day in (select a from r);
id | year | month | day | region
----+------+-------+-----+------------
12 | 2007 | 1 | 29 | new_region
(1 row)
select purchase_par.* from purchase_par,m,r where purchase_par.id = m.b and purchase_par.month = r.b;
id | year | month | day | region
----+------+-------+-----+--------
20 | 2008 | 9 | 29 | south
(1 row)
update purchase_par set month = month+1 from r,m where purchase_par.id = m.b and purchase_par.month = r.b;
UPDATE 1
select purchase_par.* from purchase_par,m,r where purchase_par.id = m.b and purchase_par.month = r.b+1;
id | year | month | day | region
----+------+-------+-----+--------
20 | 2008 | 10 | 29 | south
8 | 2003 | 13 | 29 | south
4 | 2009 | 13 | 29 | south
(3 rows)
-- heap table: delete --
select * from r where b in (select month-1 from purchase_par,m where purchase_par.id = m.b);
a | b
---+----
3 | 9
4 | 12
(2 rows)
delete from r where b in (select month-1 from purchase_par,m where purchase_par.id = m.b);
DELETE 2
select * from r where b in (select month-1 from purchase_par,m where purchase_par.id = m.b);
a | b
---+---
(0 rows)
-- master-only table: update
select m.* from m,r,purchase_par where m.a = r.b and m.b = purchase_par.id;
a | b
---+----
3 | 12
(1 row)
delete from m using r,purchase_par where m.a = r.b and m.b = purchase_par.id;
DELETE 1
select m.* from m,r,purchase_par where m.a = r.b and m.b = purchase_par.id;
a | b
---+---
(0 rows)
select m.* from m,r,purchase_par where m.a = r.a and m.b = purchase_par.id;
a | b
---+----
1 | 4
2 | 8
5 | 20
(3 rows)
update m set b = m.b + 1 from r,purchase_par where m.a = r.a and m.b = purchase_par.id;
UPDATE 3
select m.* from m,r,purchase_par where m.a = r.a and m.b = purchase_par.id + 1;
a | b
---+----
1 | 5
2 | 9
5 | 21
(3 rows)
--Drop tables
drop table r;
DROP TABLE
drop table m;
DROP TABLE
drop table purchase_par cascade;
DROP TABLE
-- start_ignore
-- end_ignore
--
-- @created 2009-01-27 14:00:00
-- @modified 2013-06-24 17:00:00
-- @tags ddl schema_topology
-- @description Joins
-- start_ignore
drop table if exists r;
psql:/path/sql_file:1: NOTICE: table "r" does not exist, skipping
DROP TABLE
drop table if exists m;
psql:/path/sql_file:1: NOTICE: table "m" does not exist, skipping
DROP TABLE
drop table if exists purchase_par cascade;
psql:/path/sql_file:1: NOTICE: table "purchase_par" does not exist, skipping
DROP TABLE
-- 3 tables: heap, master-only and partitioned table
-- end_ignore
create table r (a int, b int) distributed by (a);
CREATE TABLE
create table m ();
psql:/path/sql_file:1: NOTICE: Table has no attributes to distribute on.
CREATE TABLE
alter table m add column a int;
ALTER TABLE
alter table m add column b int;
ALTER TABLE
CREATE TABLE purchase_par (id int, year int, month int, day int, region text)
DISTRIBUTED BY (id)
PARTITION BY LIST (region)
( PARTITION usa VALUES ('south'),
PARTITION europe VALUES ('north'),
PARTITION asia VALUES ('east'),
DEFAULT PARTITION other_regions)
;
psql:/path/sql_file:1: NOTICE: CREATE TABLE will create partition "purchase_par_1_prt_usa" for table "purchase_par"
psql:/path/sql_file:1: NOTICE: CREATE TABLE will create partition "purchase_par_1_prt_europe" for table "purchase_par"
psql:/path/sql_file:1: NOTICE: CREATE TABLE will create partition "purchase_par_1_prt_asia" for table "purchase_par"
psql:/path/sql_file:1: NOTICE: CREATE TABLE will create partition "purchase_par_1_prt_other_regions" for table "purchase_par"
CREATE TABLE
insert into r select generate_series(1, 10000), generate_series(1, 10000) * 3;
INSERT 0 10000
insert into m select generate_series(1, 1000), generate_series(1, 1000) * 4;
INSERT 0 1000
Insert into purchase_par values(1,2009,13,29,'east');
INSERT 0 1
Insert into purchase_par values(2,2009,13,29,'west');
INSERT 0 1
Insert into purchase_par values(3,2009,13,29,'north');
INSERT 0 1
Insert into purchase_par values(4,2009,13,29,'south');
INSERT 0 1
Insert into purchase_par values(5,2009,13,29,'east');
INSERT 0 1
Insert into purchase_par values(6,2009,13,29,'west');
INSERT 0 1
Insert into purchase_par values(7,2002,13,29,'north');
INSERT 0 1
Insert into purchase_par values(8,2003,13,29,'south');
INSERT 0 1
Insert into purchase_par values(9,2004,13,29,'east');
INSERT 0 1
Insert into purchase_par values(10,2005,13,29,'west');
INSERT 0 1
Insert into purchase_par values(11,2006,13,29,'north');
INSERT 0 1
Insert into purchase_par values(12,2007,01,29,'south');
INSERT 0 1
Insert into purchase_par values(13,2008,02,29,'east');
INSERT 0 1
Insert into purchase_par values(14,2002,03,29,'west');
INSERT 0 1
Insert into purchase_par values(15,2003,04,29,'north');
INSERT 0 1
Insert into purchase_par values(16,2004,05,29,'south');
INSERT 0 1
Insert into purchase_par values(17,2005,06,29,'east');
INSERT 0 1
Insert into purchase_par values(18,2006,07,29,'west');
INSERT 0 1
Insert into purchase_par values(19,2007,08,29,'north');
INSERT 0 1
Insert into purchase_par values(20,2008,09,29,'south');
INSERT 0 1
-- partitioned table: update --
select purchase_par.* from purchase_par where id in (select m.b from m, r where m.a = r.b) and day in (select a from r);
id | year | month | day | region
----+------+-------+-----+--------
12 | 2007 | 1 | 29 | south
(1 row)
update purchase_par set region = 'new_region' where id in (select m.b from m, r where m.a = r.b) and day in (select a from r);
UPDATE 1
select purchase_par.* from purchase_par where id in (select m.b from m, r where m.a = r.b) and day in (select a from r);
id | year | month | day | region
----+------+-------+-----+------------
12 | 2007 | 1 | 29 | new_region
(1 row)
select purchase_par.* from purchase_par,m,r where purchase_par.id = m.b and purchase_par.month = r.b;
id | year | month | day | region
----+------+-------+-----+--------
20 | 2008 | 9 | 29 | south
(1 row)
update purchase_par set month = month+1 from r,m where purchase_par.id = m.b and purchase_par.month = r.b;
UPDATE 1
select purchase_par.* from purchase_par,m,r where purchase_par.id = m.b and purchase_par.month = r.b+1;
id | year | month | day | region
----+------+-------+-----+--------
20 | 2008 | 10 | 29 | south
8 | 2003 | 13 | 29 | south
4 | 2009 | 13 | 29 | south
(3 rows)
-- heap table: delete --
select * from r where b in (select month-1 from purchase_par,m where purchase_par.id = m.b);
a | b
---+----
4 | 12
3 | 9
(2 rows)
delete from r where b in (select month-1 from purchase_par,m where purchase_par.id = m.b);
DELETE 2
select * from r where b in (select month-1 from purchase_par,m where purchase_par.id = m.b);
a | b
---+---
(0 rows)
-- master-only table: update
select m.* from m,r,purchase_par where m.a = r.b and m.b = purchase_par.id;
a | b
---+----
3 | 12
(1 row)
delete from m using r,purchase_par where m.a = r.b and m.b = purchase_par.id;
DELETE 1
select m.* from m,r,purchase_par where m.a = r.b and m.b = purchase_par.id;
a | b
---+---
(0 rows)
select m.* from m,r,purchase_par where m.a = r.a and m.b = purchase_par.id;
a | b
---+----
2 | 8
1 | 4
5 | 20
(3 rows)
update m set b = m.b + 1 from r,purchase_par where m.a = r.a and m.b = purchase_par.id;
UPDATE 3
select m.* from m,r,purchase_par where m.a = r.a and m.b = purchase_par.id + 1;
a | b
---+----
2 | 9
1 | 5
5 | 21
(3 rows)
--Drop tables
drop table r;
DROP TABLE
drop table m;
DROP TABLE
drop table purchase_par cascade;
DROP TABLE
-- start_ignore
-- end_ignore
--
-- @created 2009-01-27 14:00:00
-- @modified 2013-06-24 17:00:00
-- @tags ddl schema_topology
-- @description Joins
-- start_ignore
drop table if exists r;
psql:/path/sql_file:1: NOTICE: table "r" does not exist, skipping
DROP TABLE
drop table if exists m;
psql:/path/sql_file:1: NOTICE: table "m" does not exist, skipping
DROP TABLE
drop table if exists purchase_par cascade;
psql:/path/sql_file:1: NOTICE: table "purchase_par" does not exist, skipping
DROP TABLE
-- 3 tables: heap, master-only and partitioned table
-- end_ignore
create table r (a int, b int) distributed by (a);
CREATE TABLE
create table m ();
psql:/path/sql_file:1: NOTICE: Table has no attributes to distribute on.
CREATE TABLE
alter table m add column a int;
ALTER TABLE
alter table m add column b int;
ALTER TABLE
CREATE TABLE purchase_par (id int, year int, month int, day int, region text)
DISTRIBUTED BY (id)
PARTITION BY LIST (region)
( PARTITION usa VALUES ('south'),
PARTITION europe VALUES ('north'),
PARTITION asia VALUES ('east'),
DEFAULT PARTITION other_regions)
;
psql:/path/sql_file:1: NOTICE: CREATE TABLE will create partition "purchase_par_1_prt_usa" for table "purchase_par"
psql:/path/sql_file:1: NOTICE: CREATE TABLE will create partition "purchase_par_1_prt_europe" for table "purchase_par"
psql:/path/sql_file:1: NOTICE: CREATE TABLE will create partition "purchase_par_1_prt_asia" for table "purchase_par"
psql:/path/sql_file:1: NOTICE: CREATE TABLE will create partition "purchase_par_1_prt_other_regions" for table "purchase_par"
CREATE TABLE
insert into r select generate_series(1, 10000), generate_series(1, 10000) * 3;
INSERT 0 10000
insert into m select generate_series(1, 1000), generate_series(1, 1000) * 4;
INSERT 0 1000
Insert into purchase_par values(1,2009,13,29,'east');
INSERT 0 1
Insert into purchase_par values(2,2009,13,29,'west');
INSERT 0 1
Insert into purchase_par values(3,2009,13,29,'north');
INSERT 0 1
Insert into purchase_par values(4,2009,13,29,'south');
INSERT 0 1
Insert into purchase_par values(5,2009,13,29,'east');
INSERT 0 1
Insert into purchase_par values(6,2009,13,29,'west');
INSERT 0 1
Insert into purchase_par values(7,2002,13,29,'north');
INSERT 0 1
Insert into purchase_par values(8,2003,13,29,'south');
INSERT 0 1
Insert into purchase_par values(9,2004,13,29,'east');
INSERT 0 1
Insert into purchase_par values(10,2005,13,29,'west');
INSERT 0 1
Insert into purchase_par values(11,2006,13,29,'north');
INSERT 0 1
Insert into purchase_par values(12,2007,01,29,'south');
INSERT 0 1
Insert into purchase_par values(13,2008,02,29,'east');
INSERT 0 1
Insert into purchase_par values(14,2002,03,29,'west');
INSERT 0 1
Insert into purchase_par values(15,2003,04,29,'north');
INSERT 0 1
Insert into purchase_par values(16,2004,05,29,'south');
INSERT 0 1
Insert into purchase_par values(17,2005,06,29,'east');
INSERT 0 1
Insert into purchase_par values(18,2006,07,29,'west');
INSERT 0 1
Insert into purchase_par values(19,2007,08,29,'north');
INSERT 0 1
Insert into purchase_par values(20,2008,09,29,'south');
INSERT 0 1
-- partitioned table: update --
select purchase_par.* from purchase_par where id in (select m.b from m, r where m.a = r.b) and day in (select a from r);
id | year | month | day | region
----+------+-------+-----+--------
12 | 2007 | 1 | 29 | south
(1 row)
update purchase_par set region = 'new_region' where id in (select m.b from m, r where m.a = r.b) and day in (select a from r);
UPDATE 1
select purchase_par.* from purchase_par where id in (select m.b from m, r where m.a = r.b) and day in (select a from r);
id | year | month | day | region
----+------+-------+-----+------------
12 | 2007 | 1 | 29 | new_region
(1 row)
select purchase_par.* from purchase_par,m,r where purchase_par.id = m.b and purchase_par.month = r.b;
id | year | month | day | region
----+------+-------+-----+--------
20 | 2008 | 9 | 29 | south
(1 row)
update purchase_par set month = month+1 from r,m where purchase_par.id = m.b and purchase_par.month = r.b;
UPDATE 1
select purchase_par.* from purchase_par,m,r where purchase_par.id = m.b and purchase_par.month = r.b+1;
id | year | month | day | region
----+------+-------+-----+--------
20 | 2008 | 10 | 29 | south
8 | 2003 | 13 | 29 | south
4 | 2009 | 13 | 29 | south
(3 rows)
-- heap table: delete --
select * from r where b in (select month-1 from purchase_par,m where purchase_par.id = m.b);
a | b
---+----
3 | 9
4 | 12
(2 rows)
delete from r where b in (select month-1 from purchase_par,m where purchase_par.id = m.b);
DELETE 2
select * from r where b in (select month-1 from purchase_par,m where purchase_par.id = m.b);
a | b
---+---
(0 rows)
-- master-only table: update
select m.* from m,r,purchase_par where m.a = r.b and m.b = purchase_par.id;
a | b
---+----
3 | 12
(1 row)
delete from m using r,purchase_par where m.a = r.b and m.b = purchase_par.id;
DELETE 1
select m.* from m,r,purchase_par where m.a = r.b and m.b = purchase_par.id;
a | b
---+---
(0 rows)
select m.* from m,r,purchase_par where m.a = r.a and m.b = purchase_par.id;
a | b
---+----
1 | 4
5 | 20
2 | 8
(3 rows)
update m set b = m.b + 1 from r,purchase_par where m.a = r.a and m.b = purchase_par.id;
UPDATE 3
select m.* from m,r,purchase_par where m.a = r.a and m.b = purchase_par.id + 1;
a | b
---+----
2 | 9
1 | 5
5 | 21
(3 rows)
--Drop tables
drop table r;
DROP TABLE
drop table m;
DROP TABLE
drop table purchase_par cascade;
DROP TABLE
-- start_ignore
-- end_ignore
--
-- @created 2009-01-27 14:00:00
-- @modified 2013-06-24 17:00:00
-- @tags ddl schema_topology
-- @description Joins
-- start_ignore
drop table if exists r;
psql:/path/sql_file:1: NOTICE: table "r" does not exist, skipping
DROP TABLE
drop table if exists m;
psql:/path/sql_file:1: NOTICE: table "m" does not exist, skipping
DROP TABLE
drop table if exists purchase_par cascade;
psql:/path/sql_file:1: NOTICE: table "purchase_par" does not exist, skipping
DROP TABLE
-- 3 tables: heap, master-only and partitioned table
-- end_ignore
create table r (a int, b int) distributed by (a);
CREATE TABLE
create table m ();
psql:/path/sql_file:1: NOTICE: Table has no attributes to distribute on.
CREATE TABLE
alter table m add column a int;
ALTER TABLE
alter table m add column b int;
ALTER TABLE
CREATE TABLE purchase_par (id int, year int, month int, day int, region text)
DISTRIBUTED BY (id)
PARTITION BY LIST (region)
( PARTITION usa VALUES ('south'),
PARTITION europe VALUES ('north'),
PARTITION asia VALUES ('east'),
DEFAULT PARTITION other_regions)
;
psql:/path/sql_file:1: NOTICE: CREATE TABLE will create partition "purchase_par_1_prt_usa" for table "purchase_par"
psql:/path/sql_file:1: NOTICE: CREATE TABLE will create partition "purchase_par_1_prt_europe" for table "purchase_par"
psql:/path/sql_file:1: NOTICE: CREATE TABLE will create partition "purchase_par_1_prt_asia" for table "purchase_par"
psql:/path/sql_file:1: NOTICE: CREATE TABLE will create partition "purchase_par_1_prt_other_regions" for table "purchase_par"
CREATE TABLE
insert into r select generate_series(1, 10000), generate_series(1, 10000) * 3;
INSERT 0 10000
insert into m select generate_series(1, 1000), generate_series(1, 1000) * 4;
INSERT 0 1000
Insert into purchase_par values(1,2009,13,29,'east');
INSERT 0 1
Insert into purchase_par values(2,2009,13,29,'west');
INSERT 0 1
Insert into purchase_par values(3,2009,13,29,'north');
INSERT 0 1
Insert into purchase_par values(4,2009,13,29,'south');
INSERT 0 1
Insert into purchase_par values(5,2009,13,29,'east');
INSERT 0 1
Insert into purchase_par values(6,2009,13,29,'west');
INSERT 0 1
Insert into purchase_par values(7,2002,13,29,'north');
INSERT 0 1
Insert into purchase_par values(8,2003,13,29,'south');
INSERT 0 1
Insert into purchase_par values(9,2004,13,29,'east');
INSERT 0 1
Insert into purchase_par values(10,2005,13,29,'west');
INSERT 0 1
Insert into purchase_par values(11,2006,13,29,'north');
INSERT 0 1
Insert into purchase_par values(12,2007,01,29,'south');
INSERT 0 1
Insert into purchase_par values(13,2008,02,29,'east');
INSERT 0 1
Insert into purchase_par values(14,2002,03,29,'west');
INSERT 0 1
Insert into purchase_par values(15,2003,04,29,'north');
INSERT 0 1
Insert into purchase_par values(16,2004,05,29,'south');
INSERT 0 1
Insert into purchase_par values(17,2005,06,29,'east');
INSERT 0 1
Insert into purchase_par values(18,2006,07,29,'west');
INSERT 0 1
Insert into purchase_par values(19,2007,08,29,'north');
INSERT 0 1
Insert into purchase_par values(20,2008,09,29,'south');
INSERT 0 1
-- partitioned table: update --
select purchase_par.* from purchase_par where id in (select m.b from m, r where m.a = r.b) and day in (select a from r);
id | year | month | day | region
----+------+-------+-----+--------
12 | 2007 | 1 | 29 | south
(1 row)
update purchase_par set region = 'new_region' where id in (select m.b from m, r where m.a = r.b) and day in (select a from r);
UPDATE 1
select purchase_par.* from purchase_par where id in (select m.b from m, r where m.a = r.b) and day in (select a from r);
id | year | month | day | region
----+------+-------+-----+------------
12 | 2007 | 1 | 29 | new_region
(1 row)
select purchase_par.* from purchase_par,m,r where purchase_par.id = m.b and purchase_par.month = r.b;
id | year | month | day | region
----+------+-------+-----+--------
20 | 2008 | 9 | 29 | south
(1 row)
update purchase_par set month = month+1 from r,m where purchase_par.id = m.b and purchase_par.month = r.b;
UPDATE 1
select purchase_par.* from purchase_par,m,r where purchase_par.id = m.b and purchase_par.month = r.b+1;
id | year | month | day | region
----+------+-------+-----+--------
20 | 2008 | 10 | 29 | south
8 | 2003 | 13 | 29 | south
4 | 2009 | 13 | 29 | south
(3 rows)
-- heap table: delete --
select * from r where b in (select month-1 from purchase_par,m where purchase_par.id = m.b);
a | b
---+----
3 | 9
4 | 12
(2 rows)
delete from r where b in (select month-1 from purchase_par,m where purchase_par.id = m.b);
DELETE 2
select * from r where b in (select month-1 from purchase_par,m where purchase_par.id = m.b);
a | b
---+---
(0 rows)
-- master-only table: update
select m.* from m,r,purchase_par where m.a = r.b and m.b = purchase_par.id;
a | b
---+----
3 | 12
(1 row)
delete from m using r,purchase_par where m.a = r.b and m.b = purchase_par.id;
DELETE 1
select m.* from m,r,purchase_par where m.a = r.b and m.b = purchase_par.id;
a | b
---+---
(0 rows)
select m.* from m,r,purchase_par where m.a = r.a and m.b = purchase_par.id;
a | b
---+----
2 | 8
1 | 4
5 | 20
(3 rows)
update m set b = m.b + 1 from r,purchase_par where m.a = r.a and m.b = purchase_par.id;
UPDATE 3
select m.* from m,r,purchase_par where m.a = r.a and m.b = purchase_par.id + 1;
a | b
---+----
2 | 9
1 | 5
5 | 21
(3 rows)
--Drop tables
drop table r;
DROP TABLE
drop table m;
DROP TABLE
drop table purchase_par cascade;
DROP TABLE
-- start_ignore
-- end_ignore
--
-- @created 2009-01-27 14:00:00
-- @modified 2013-06-24 17:00:00
-- @tags ddl schema_topology
-- @description Joins
-- start_ignore
drop table if exists r;
psql:/path/sql_file:1: NOTICE: table "r" does not exist, skipping
DROP TABLE
drop table if exists m;
psql:/path/sql_file:1: NOTICE: table "m" does not exist, skipping
DROP TABLE
drop table if exists purchase_par cascade;
psql:/path/sql_file:1: NOTICE: table "purchase_par" does not exist, skipping
DROP TABLE
-- 3 tables: heap, master-only and partitioned table
-- end_ignore
create table r (a int, b int) distributed by (a);
CREATE TABLE
create table m ();
psql:/path/sql_file:1: NOTICE: Table has no attributes to distribute on.
CREATE TABLE
alter table m add column a int;
ALTER TABLE
alter table m add column b int;
ALTER TABLE
CREATE TABLE purchase_par (id int, year int, month int, day int, region text)
DISTRIBUTED BY (id)
PARTITION BY LIST (region)
( PARTITION usa VALUES ('south'),
PARTITION europe VALUES ('north'),
PARTITION asia VALUES ('east'),
DEFAULT PARTITION other_regions)
;
psql:/path/sql_file:1: NOTICE: CREATE TABLE will create partition "purchase_par_1_prt_usa" for table "purchase_par"
psql:/path/sql_file:1: NOTICE: CREATE TABLE will create partition "purchase_par_1_prt_europe" for table "purchase_par"
psql:/path/sql_file:1: NOTICE: CREATE TABLE will create partition "purchase_par_1_prt_asia" for table "purchase_par"
psql:/path/sql_file:1: NOTICE: CREATE TABLE will create partition "purchase_par_1_prt_other_regions" for table "purchase_par"
CREATE TABLE
insert into r select generate_series(1, 10000), generate_series(1, 10000) * 3;
INSERT 0 10000
insert into m select generate_series(1, 1000), generate_series(1, 1000) * 4;
INSERT 0 1000
Insert into purchase_par values(1,2009,13,29,'east');
INSERT 0 1
Insert into purchase_par values(2,2009,13,29,'west');
INSERT 0 1
Insert into purchase_par values(3,2009,13,29,'north');
INSERT 0 1
Insert into purchase_par values(4,2009,13,29,'south');
INSERT 0 1
Insert into purchase_par values(5,2009,13,29,'east');
INSERT 0 1
Insert into purchase_par values(6,2009,13,29,'west');
INSERT 0 1
Insert into purchase_par values(7,2002,13,29,'north');
INSERT 0 1
Insert into purchase_par values(8,2003,13,29,'south');
INSERT 0 1
Insert into purchase_par values(9,2004,13,29,'east');
INSERT 0 1
Insert into purchase_par values(10,2005,13,29,'west');
INSERT 0 1
Insert into purchase_par values(11,2006,13,29,'north');
INSERT 0 1
Insert into purchase_par values(12,2007,01,29,'south');
INSERT 0 1
Insert into purchase_par values(13,2008,02,29,'east');
INSERT 0 1
Insert into purchase_par values(14,2002,03,29,'west');
INSERT 0 1
Insert into purchase_par values(15,2003,04,29,'north');
INSERT 0 1
Insert into purchase_par values(16,2004,05,29,'south');
INSERT 0 1
Insert into purchase_par values(17,2005,06,29,'east');
INSERT 0 1
Insert into purchase_par values(18,2006,07,29,'west');
INSERT 0 1
Insert into purchase_par values(19,2007,08,29,'north');
INSERT 0 1
Insert into purchase_par values(20,2008,09,29,'south');
INSERT 0 1
-- partitioned table: update --
select purchase_par.* from purchase_par where id in (select m.b from m, r where m.a = r.b) and day in (select a from r);
id | year | month | day | region
----+------+-------+-----+--------
12 | 2007 | 1 | 29 | south
(1 row)
update purchase_par set region = 'new_region' where id in (select m.b from m, r where m.a = r.b) and day in (select a from r);
UPDATE 1
select purchase_par.* from purchase_par where id in (select m.b from m, r where m.a = r.b) and day in (select a from r);
id | year | month | day | region
----+------+-------+-----+------------
12 | 2007 | 1 | 29 | new_region
(1 row)
select purchase_par.* from purchase_par,m,r where purchase_par.id = m.b and purchase_par.month = r.b;
id | year | month | day | region
----+------+-------+-----+--------
20 | 2008 | 9 | 29 | south
(1 row)
update purchase_par set month = month+1 from r,m where purchase_par.id = m.b and purchase_par.month = r.b;
UPDATE 1
select purchase_par.* from purchase_par,m,r where purchase_par.id = m.b and purchase_par.month = r.b+1;
id | year | month | day | region
----+------+-------+-----+--------
8 | 2003 | 13 | 29 | south
4 | 2009 | 13 | 29 | south
20 | 2008 | 10 | 29 | south
(3 rows)
-- heap table: delete --
select * from r where b in (select month-1 from purchase_par,m where purchase_par.id = m.b);
a | b
---+----
4 | 12
3 | 9
(2 rows)
delete from r where b in (select month-1 from purchase_par,m where purchase_par.id = m.b);
DELETE 2
select * from r where b in (select month-1 from purchase_par,m where purchase_par.id = m.b);
a | b
---+---
(0 rows)
-- master-only table: update
a | b
---+----
3 | 12
(1 row)
delete from m using r,purchase_par where m.a = r.b and m.b = purchase_par.id;
DELETE 1
select m.* from m,r,purchase_par where m.a = r.b and m.b = purchase_par.id;
a | b
---+---
(0 rows)
select m.* from m,r,purchase_par where m.a = r.a and m.b = purchase_par.id;
a | b
---+----
5 | 20
2 | 8
1 | 4
(3 rows)
update m set b = m.b + 1 from r,purchase_par where m.a = r.a and m.b = purchase_par.id;
UPDATE 3
select m.* from m,r,purchase_par where m.a = r.a and m.b = purchase_par.id + 1;
a | b
---+----
1 | 5
5 | 21
2 | 9
(3 rows)
--Drop tables
drop table r;
DROP TABLE
drop table m;
DROP TABLE
drop table purchase_par cascade;
DROP TABLE
--
-- @created 2009-01-27 14:00:00
-- @modified 2013-06-24 17:00:00
-- @tags ddl schema_topology
-- @description Joins
-- start_ignore
drop table if exists r;
drop table if exists m;
drop table if exists purchase_par cascade;
-- 3 tables: heap, master-only and partitioned table
-- end_ignore
create table r (a int, b int) distributed by (a);
create table m ();
alter table m add column a int;
alter table m add column b int;
CREATE TABLE purchase_par (id int, year int, month int, day int, region text)
DISTRIBUTED BY (id)
PARTITION BY LIST (region)
( PARTITION usa VALUES ('south'),
PARTITION europe VALUES ('north'),
PARTITION asia VALUES ('east'),
DEFAULT PARTITION other_regions)
;
insert into r select generate_series(1, 10000), generate_series(1, 10000) * 3;
insert into m select generate_series(1, 1000), generate_series(1, 1000) * 4;
Insert into purchase_par values(1,2009,13,29,'east');
Insert into purchase_par values(2,2009,13,29,'west');
Insert into purchase_par values(3,2009,13,29,'north');
Insert into purchase_par values(4,2009,13,29,'south');
Insert into purchase_par values(5,2009,13,29,'east');
Insert into purchase_par values(6,2009,13,29,'west');
Insert into purchase_par values(7,2002,13,29,'north');
Insert into purchase_par values(8,2003,13,29,'south');
Insert into purchase_par values(9,2004,13,29,'east');
Insert into purchase_par values(10,2005,13,29,'west');
Insert into purchase_par values(11,2006,13,29,'north');
Insert into purchase_par values(12,2007,01,29,'south');
Insert into purchase_par values(13,2008,02,29,'east');
Insert into purchase_par values(14,2002,03,29,'west');
Insert into purchase_par values(15,2003,04,29,'north');
Insert into purchase_par values(16,2004,05,29,'south');
Insert into purchase_par values(17,2005,06,29,'east');
Insert into purchase_par values(18,2006,07,29,'west');
Insert into purchase_par values(19,2007,08,29,'north');
Insert into purchase_par values(20,2008,09,29,'south');
-- partitioned table: update --
select purchase_par.* from purchase_par where id in (select m.b from m, r where m.a = r.b) and day in (select a from r);
update purchase_par set region = 'new_region' where id in (select m.b from m, r where m.a = r.b) and day in (select a from r);
select purchase_par.* from purchase_par where id in (select m.b from m, r where m.a = r.b) and day in (select a from r);
select purchase_par.* from purchase_par,m,r where purchase_par.id = m.b and purchase_par.month = r.b;
update purchase_par set month = month+1 from r,m where purchase_par.id = m.b and purchase_par.month = r.b;
select purchase_par.* from purchase_par,m,r where purchase_par.id = m.b and purchase_par.month = r.b+1;
-- heap table: delete --
select * from r where b in (select month-1 from purchase_par,m where purchase_par.id = m.b);
delete from r where b in (select month-1 from purchase_par,m where purchase_par.id = m.b);
select * from r where b in (select month-1 from purchase_par,m where purchase_par.id = m.b);
-- master-only table: update
select m.* from m,r,purchase_par where m.a = r.b and m.b = purchase_par.id;
delete from m using r,purchase_par where m.a = r.b and m.b = purchase_par.id;
select m.* from m,r,purchase_par where m.a = r.b and m.b = purchase_par.id;
select m.* from m,r,purchase_par where m.a = r.a and m.b = purchase_par.id;
update m set b = m.b + 1 from r,purchase_par where m.a = r.a and m.b = purchase_par.id;
select m.* from m,r,purchase_par where m.a = r.a and m.b = purchase_par.id + 1;
--Drop tables
drop table r;
drop table m;
drop table purchase_par cascade;
"""
Copyright (C) 2004-2015 Pivotal Software, Inc. All rights reserved.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import tinctest
import os
import inspect
from mpp.models import SQLTestCase
from tinctest.lib import local_path, Gpdiff
from mpp.lib.PSQL import PSQL
class DMLOverJoinsTest(SQLTestCase):
"""
@description This contains several test cases for possible ways of manipulating objects. This test case specifically deals with Joins. For more details refer QA task - QA-143
@created 2009-01-27 14:00:00
@modified 2013-10-17 17:10:15
@tags ddl schema_topology
"""
sql_dir = 'sqls/ddls/joins'
ans_dir = 'sqls/ddls/joins'
out_dir = 'sqls/ddls/joins'
@classmethod
def setUpClass(cls):
super(DMLOverJoinsTest, cls).setUpClass()
tinctest.logger.info("*** Running the pre-requisite sql files drop.sql and setup.sql")
PSQL.run_sql_file(local_path('sqls/setup/drop.sql'))
PSQL.run_sql_file(local_path('sqls/setup/create.sql'))
tinctest.logger.info("Starting the join test.. ")
def count_segs(self):
""" Counts the no. of segments from the cluster configuration """
cmd_str = "SELECT COUNT(*) FROM gp_segment_configuration WHERE content <> -1 and preferred_role = 'p'"
out = PSQL.run_sql_command(cmd_str).split("\n")[3].strip()
return int(out)
def local_path(self, filename):
"""
Return the absolute path of the input file.:Overriding it here to use the absolute path instead of relative"""
frame = inspect.stack()[1]
source_file = inspect.getsourcefile(frame[0])
source_dir = os.path.dirname(os.path.abspath(source_file))
return os.path.join(source_dir, filename)
@classmethod
def get_out_dir(cls):
# If the sqls are located in a different directory than the source file, create an output
# directory at the same level as the sql dir
if cls.get_source_dir() == cls.get_sql_dir():
out_dir = os.path.join(cls.get_sql_dir(), 'output/DMLOverJoinsTest/')
else:
out_dir = os.path.join(cls.get_sql_dir(), '../output/DMLOverJoinsTest/')
if not os.path.exists(out_dir):
os.makedirs(out_dir)
return out_dir
def verify_out_file(self, out_file, ans_file):
if ans_file is not None:
# ramans2: Modified test case to pick different answer files depending on the # segments in the cluster
ans_file = self.local_path(ans_file+".%s" %self.count_segs())
self.test_artifacts.append(ans_file)
# Check if an init file exists in the same location as the sql file
init_files = []
init_file_path = os.path.join(self.get_sql_dir(), 'init_file')
if os.path.exists(init_file_path):
init_files.append(init_file_path)
result = Gpdiff.are_files_equal(out_file, ans_file, match_sub = init_files)
if result == False:
self.test_artifacts.append(out_file.replace('.out', '.diff'))
return result
"""
Copyright (C) 2004-2015 Pivotal Software, Inc. All rights reserved.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import tinctest
import os
import inspect
from mpp.models import SQLTestCase
from tinctest.lib import local_path, Gpdiff
from mpp.lib.PSQL import PSQL
class DMLOverJoinsTest(SQLTestCase):
"""
@description This contains several test cases for possible ways of manipulating objects. This test case specifically deals with Joins. For more details refer QA task - QA-143
@created 2009-01-27 14:00:00
@modified 2013-10-17 17:10:15
@tags ddl schema_topology
"""
sql_dir = 'sqls/ddls/joins'
ans_dir = 'sqls/ddls/joins'
out_dir = 'sqls/ddls/joins'
@classmethod
def setUpClass(cls):
super(DMLOverJoinsTest, cls).setUpClass()
tinctest.logger.info("*** Running the pre-requisite sql files drop.sql and setup.sql")
PSQL.run_sql_file(local_path('sqls/setup/drop.sql'))
PSQL.run_sql_file(local_path('sqls/setup/create.sql'))
tinctest.logger.info("Starting the join test.. ")
def count_segs(self):
""" Counts the no. of segments from the cluster configuration """
cmd_str = "SELECT COUNT(*) FROM gp_segment_configuration WHERE content <> -1 and preferred_role = 'p'"
out = PSQL.run_sql_command(cmd_str).split("\n")[3].strip()
return int(out)
def local_path(self, filename):
"""
Return the absolute path of the input file.:Overriding it here to use the absolute path instead of relative"""
frame = inspect.stack()[1]
source_file = inspect.getsourcefile(frame[0])
source_dir = os.path.dirname(os.path.abspath(source_file))
return os.path.join(source_dir, filename)
@classmethod
def get_out_dir(cls):
# If the sqls are located in a different directory than the source file, create an output
# directory at the same level as the sql dir
if cls.get_source_dir() == cls.get_sql_dir():
out_dir = os.path.join(cls.get_sql_dir(), 'output/DMLOverJoinsTest/')
else:
out_dir = os.path.join(cls.get_sql_dir(), '../output/DMLOverJoinsTest/')
if not os.path.exists(out_dir):
os.makedirs(out_dir)
return out_dir
def _which_ans_file(self, sql_file, optimizer):
'''
selects the right answer file depending on whether optimizer_mode is on or not
if optimizer is True, answer file = .ans.orca and if not present, .ans
if optimizer is False, answer file = .ans.planner and if not present, .ans
if optimizer is None, answer file = .ans
'''
base_sql_file = os.path.basename(sql_file)
ans_file = None
if optimizer == True:
ans_file = os.path.join(self.get_ans_dir(), base_sql_file.replace('.sql', '.ans.orca'))
# ramans2: Modified test case to pick different answer files depending on the # segments in the cluster
elif optimizer == False:
ans_file = os.path.join(self.get_ans_dir(), base_sql_file.replace('.sql', '.ans.planner'))
else:
if self.__class__._global_optimizer_mode == 'on':
ans_file = os.path.join(self.get_ans_dir(), base_sql_file.replace('.sql', '.ans.orca'))
else:
ans_file = os.path.join(self.get_ans_dir(), base_sql_file.replace('.sql', '.ans.planner'))
if not ans_file:
ans_file = os.path.join(self.get_ans_dir(), base_sql_file.replace('.sql', '.ans'))
else:
if not os.path.exists(ans_file):
ans_file = os.path.join(self.get_ans_dir(), base_sql_file.replace('.sql', '.ans'))
ans_file = ans_file+".%s" %self.count_segs()
return ans_file;
......@@ -34,24 +34,23 @@ class CrashRecoverySchemaTopologyTestCase(ScenarioTestCase):
db.setupDatabase('gptest')
#Run setup class which creates database componets, filespaces etc.
test_ST_GPFilespaceTablespaceTest.GPFilespaceTablespaceTest.setUpClass()
def execute_individual_tests(self):
'''
This test runs the five schema topology tests from classlist. Each of these five tests has a number of sql files.
tloader.loadTestsFromName creates test methods for individual sql files.
This test runs four schema topology tests from classlist. Each of these four tests has a number of sql files.
tloader.loadTestsFromName creates test methods for individual sql files.
For each sql file in each test case we first run the suspend check point fault injector, next we run the test method gerenrated earlier
finally we run checks.
'''
classlist = []
classlist.append('mpp.gpdb.tests.catalog.schema_topology.test_ST_DMLOverJoinsTest.DMLOverJoinsTest')
classlist.append('mpp.gpdb.tests.catalog.schema_topology.test_ST_EnhancedTableFunctionTest.EnhancedTableFunctionTest')
classlist.append('mpp.gpdb.tests.catalog.schema_topology.test_ST_OSSpecificSQLsTest.OSSpecificSQLsTest')
classlist.append('mpp.gpdb.tests.catalog.schema_topology.test_ST_AllSQLsTest.AllSQLsTest')
classlist.append('mpp.gpdb.tests.catalog.schema_topology.test_ST_GPFilespaceTablespaceTest.GPFilespaceTablespaceTest')
for classname in classlist:
tinctest.logger.info("\n\nrunning the test")
tloader = TINCTestLoader()
tests = tloader.loadTestsFromName(name=classname)
......@@ -60,16 +59,16 @@ class CrashRecoverySchemaTopologyTestCase(ScenarioTestCase):
test_list_2 = []
test_list_2.append('mpp.gpdb.tests.storage.crashrecovery.SuspendCheckpointCrashRecovery.run_fault_injector_to_skip_checkpoint')
self.test_case_scenario.append(test_list_2)
for test in tests:
testname = '%s.%s.%s' %(test.__class__.__module__, test.__class__.__name__, test._testMethodName)
tinctest.logger.info(testname)
test_list_3 = []
test_list_3.append(testname)
self.test_case_scenario.append(test_list_3)
tinctest.logger.info(testname)
test_list_4 = []
test_list_4.append('mpp.gpdb.tests.storage.crashrecovery.SuspendCheckpointCrashRecovery.do_post_run_checks')
self.test_case_scenario.append(test_list_4)
......
......@@ -496,19 +496,16 @@ class GPExpandTestCase(MPPTestCase, ScenarioTestCase):
def construct_expansion_scenario(self):
classlist = []
classlist.append('mpp.gpdb.tests.catalog.schema_topology.test_ST_DMLOverJoinsTest.DMLOverJoinsTest')
classlist.append('mpp.gpdb.tests.catalog.schema_topology.test_ST_EnhancedTableFunctionTest.EnhancedTableFunctionTest')
classlist.append('mpp.gpdb.tests.catalog.schema_topology.test_ST_OSSpecificSQLsTest.OSSpecificSQLsTest')
classlist.append('mpp.gpdb.tests.catalog.schema_topology.test_ST_AllSQLsTest.AllSQLsTest')
classlist.append('mpp.gpdb.tests.catalog.schema_topology.test_ST_GPFilespaceTablespaceTest.GPFilespaceTablespaceTest')
# removed check for helpfile
if self.run_workload:
# Run expansion workload
if self.use_parallel_expansion:
self.test_case_scenario.append(['%s.scenarios.workloads.test_run_workload.PreExpansionWorkloadTests.test_create_parallel_expansion_workload' %self.package_name])
elif self.duration_enabled or self.use_end_time:
elif self.duration_enabled or self.use_end_time:
self.test_case_scenario.append(['%s.scenarios.workloads.test_run_workload.PreExpansionWorkloadTests.test_create_duration_workload' %self.package_name])
elif self.ranks_enabled:
self.test_case_scenario.append(['%s.scenarios.workloads.test_run_workload.PreExpansionWorkloadTests.test_create_base_workload' %self.package_name])
......
......@@ -156,12 +156,9 @@ class GprecoversegTest(ScenarioTestCase):
test_case_list3.append('mpp.gpdb.tests.catalog.schema_topology.test_ST_GPFilespaceTablespaceTest.GPFilespaceTablespaceTest')
self.test_case_scenario.append(test_case_list3)
test_case_list4 = []
test_case_list4.append('mpp.gpdb.tests.catalog.schema_topology.test_ST_DMLOverJoinsTest.DMLOverJoinsTest')
test_case_list4.append('mpp.gpdb.tests.catalog.schema_topology.test_ST_EnhancedTableFunctionTest.EnhancedTableFunctionTest')
self.test_case_scenario.append(test_case_list4)
test_case_list5 = []
test_case_list5.append('mpp.gpdb.tests.catalog.schema_topology.test_ST_EnhancedTableFunctionTest.EnhancedTableFunctionTest')
self.test_case_scenario.append(test_case_list5)
def test_gprecoverseg_config_validation(self):
"""
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册