未验证 提交 54551db4 编写于 作者: Z Zhenghua Lyu 提交者: GitHub

Fix lockmode issuses

Lockmode of Update|Delete or Select-for-update Statement
is controlled by: whether the table is AO or heap and the
GUC gp_enable_global_deadlock_detector.

The logic for lockmode is:
  1. Select-for-update always hold ExclusiveLock
  2. UPDATE|DELETE on AO tables always hold ExclusiveLock
  3. UPDATE|DELETE on heap tables hold ExclusiveLock when
     gp_enable_global_deadlock_detector is off, otherwise
     hold RowExclusiveLock

We hold locks in parser stage and Initplan before executing,
the lockmode should be the same at the two stages.

This commit fixes lockmode issues to make things correct.
Co-authored-by: NShujie Zhang <shzhang@pivotal.io>
上级 26695ad3
......@@ -1204,19 +1204,13 @@ CdbTryOpenRelation(Oid relid, LOCKMODE reqmode, bool noWait, bool *lockUpgraded)
*/
if (lockmode == RowExclusiveLock)
{
rel = try_heap_open(relid, NoLock, noWait);
if (!rel)
return NULL;
if (Gp_role == GP_ROLE_DISPATCH &&
(!gp_enable_global_deadlock_detector ||
RelationIsAppendOptimized(rel)))
CondUpgradeRelLock(relid, noWait))
{
lockmode = ExclusiveLock;
if (lockUpgraded != NULL)
*lockUpgraded = true;
}
relation_close(rel, NoLock);
}
rel = try_heap_open(relid, lockmode, noWait);
......
......@@ -1065,26 +1065,15 @@ addRangeTableEntry(ParseState *pstate,
* from postgres to allow for required lock promotion for distributed
* AO tables.
* select for update should lock the whole table, we do it here.
* See discussion on https://groups.google.com/a/greenplum.org/d/msg/gpdb-dev/p-6_dNjnRMQ/OzTnb586AwAJ
* And we do not have to treat system tables different because directly dms
* on system tables are rare.
*/
locking = getLockedRefname(pstate, refname);
if (locking)
{
if (locking->strength >= LCS_FORNOKEYUPDATE)
{
Oid relid;
relid = RangeVarGetRelid(relation, lockmode, false);
rel = try_heap_open(relid, NoLock, true);
if (!rel)
elog(ERROR, "open relation(%u) fail", relid);
lockmode = IsSystemRelation(rel) ? RowExclusiveLock : ExclusiveLock;
heap_close(rel, NoLock);
}
else
{
lockmode = RowShareLock;
}
lockmode = locking->strength >= LCS_FORNOKEYUPDATE ?
ExclusiveLock : RowShareLock;
nowait = locking->noWait;
}
......
......@@ -125,8 +125,9 @@ AcquireRewriteLocks(Query *parsetree,
bool forExecute,
bool forUpdatePushedDown)
{
ListCell *l;
int rt_index;
ListCell *l;
int rt_index;
RowMarkClause *rc;
acquireLocksOnSubLinks_context context;
context.for_execute = forExecute;
......@@ -162,30 +163,34 @@ AcquireRewriteLocks(Query *parsetree,
* CDB: The proper lock mode depends on whether the relation is
* local or distributed, which is discovered by heap_open().
* To handle this we make use of CdbOpenRelation().
*
* For update should hold ExclusiveLock, see the discussion on
* https://groups.google.com/a/greenplum.org/d/msg/gpdb-dev/p-6_dNjnRMQ/OzTnb586AwAJ
*
* Update|DELETE may have to upgrade the locks to avoid global
* deadlock and CdbOpenRelation will do more check for AO table
* and GDD's status.
*/
needLockUpgrade = false;
if (!forExecute)
lockmode = AccessShareLock;
else if (rt_index == parsetree->resultRelation)
{
lockmode = RowExclusiveLock;
needLockUpgrade = (parsetree->commandType == CMD_UPDATE ||
parsetree->commandType == CMD_DELETE);
}
else if (forUpdatePushedDown ||
get_parse_rowmark(parsetree, rt_index) != NULL)
lockmode = RowShareLock;
else
lockmode = AccessShareLock;
/* Target of INSERT/UPDATE/DELETE? */
if (rt_index == parsetree->resultRelation)
{
lockmode = RowExclusiveLock;
if (parsetree->commandType != CMD_INSERT)
needLockUpgrade = true;
}
/* FOR UPDATE/SHARE? */
else if (get_parse_rowmark(parsetree, rt_index) != NULL)
rc = get_parse_rowmark(parsetree, rt_index);
if (rc != NULL)
{
needLockUpgrade = true;
lockmode = rc->strength >= LCS_FORNOKEYUPDATE ?
ExclusiveLock : RowShareLock;
}
/* Take a lock either using CDB lock promotion or not */
......
......@@ -30,6 +30,7 @@
#include "catalog/namespace.h"
#include "cdb/cdbvars.h"
#include "utils/lsyscache.h" /* CDB: get_rel_namespace() */
#include "utils/guc.h"
/*
......@@ -1075,19 +1076,28 @@ LockTagIsTemp(const LOCKTAG *tag)
}
/*
* Because of the current disign of AO table's visibility map,
* If gp_enable_global_deadlock_detector is set off, we always
* have to upgrade lock level to avoid global deadlock, and then
* because of the current disign of AO table's visibility map,
* we have to keep upgrading locks for AO table.
*/
bool
CondUpgradeRelLock(Oid relid)
CondUpgradeRelLock(Oid relid, bool noWait)
{
Relation rel;
bool upgrade = false;
rel = try_relation_open(relid, NoLock, true);
if (!gp_enable_global_deadlock_detector)
return true;
/*
* try_relation_open will throw error if
* the relation is invaliad
*/
rel = try_relation_open(relid, NoLock, noWait);
if (!rel)
elog(ERROR, "Relation open failed!");
return false;
else if (RelationIsAppendOptimized(rel))
upgrade = true;
else
......
......@@ -1510,25 +1510,31 @@ AcquireExecutorLocks(List *stmt_list, bool acquire)
* RowExclusiveLock is acquired in PostgreSQL here. Greenplum
* acquires ExclusiveLock to avoid distributed deadlock due to
* concurrent UPDATE/DELETE on the same table. This is in
* parity with CdbTryOpenRelation(). Catalog tables are
* replicated across cluster and don't suffer from the
* deadlock.
* Since we have introduced Global Deadlock Detector, only for ao
* table should we upgrade the lock.
* parity with CdbTryOpenRelation(). If it is heap table and
* the GDD is enabled, we could acquire RowExclusiveLock here.
*/
if (rte->relid >= FirstNormalObjectId &&
(plannedstmt->commandType == CMD_UPDATE ||
if ((plannedstmt->commandType == CMD_UPDATE ||
plannedstmt->commandType == CMD_DELETE) &&
CondUpgradeRelLock(rte->relid))
CondUpgradeRelLock(rte->relid, false))
lockmode = ExclusiveLock;
else
lockmode = RowExclusiveLock;
}
else if ((rc = get_plan_rowmark(plannedstmt->rowMarks, rt_index)) != NULL &&
RowMarkRequiresRowShareLock(rc->markType))
lockmode = RowShareLock;
else
lockmode = AccessShareLock;
{
/* GPDB specific behavior
* Select for update should acquire ExclusiveLock, see
* discussion on https://groups.google.com/a/greenplum.org/d/msg/gpdb-dev/p-6_dNjnRMQ/OzTnb586AwAJ
*/
rc = get_plan_rowmark(plannedstmt->rowMarks, rt_index);
if (rc != NULL)
{
lockmode = RowMarkRequiresRowShareLock(rc->markType) ?
RowShareLock : ExclusiveLock;
}
else
lockmode = AccessShareLock;
}
if (acquire)
LockRelationOid(rte->relid, lockmode);
......@@ -1599,25 +1605,38 @@ ScanQueryForLocks(Query *parsetree, bool acquire)
if (rt_index == parsetree->resultRelation)
{
/*
* RowExclusiveLock is acquired in PostgreSQL here.
* Greenplum acquires ExclusiveLock to avoid distributed
* deadlock due to concurrent UPDATE/DELETE on the same
* table. This is in parity with CdbTryOpenRelation().
* Catalog tables are replicated across cluster and don't
* suffer from the deadlock.
* RowExclusiveLock is acquired in PostgreSQL here. Greenplum
* acquires ExclusiveLock to avoid distributed deadlock due to
* concurrent UPDATE/DELETE on the same table. This is in
* parity with CdbTryOpenRelation(). If it is heap table and
* the GDD is enabled, we could acquire RowExclusiveLock here.
*/
if (rte->relid >= FirstNormalObjectId &&
(parsetree->commandType == CMD_UPDATE ||
if ((parsetree->commandType == CMD_UPDATE ||
parsetree->commandType == CMD_DELETE) &&
CondUpgradeRelLock(rte->relid))
CondUpgradeRelLock(rte->relid, false))
lockmode = ExclusiveLock;
else
lockmode = RowExclusiveLock;
}
else if (get_parse_rowmark(parsetree, rt_index) != NULL)
lockmode = RowShareLock;
else
lockmode = AccessShareLock;
{
/*
* GPDB specific behaviour:
* Select for update should acquire ExclusiveLock, see
* discussion on https://groups.google.com/a/greenplum.org/d/msg/gpdb-dev/p-6_dNjnRMQ/OzTnb586AwAJ
*/
RowMarkClause *rc;
rc = get_parse_rowmark(parsetree, rt_index);
if (rc != NULL)
{
lockmode = rc->strength >= LCS_FORNOKEYUPDATE ?
ExclusiveLock : RowShareLock;
}
else
lockmode = AccessShareLock;
}
if (acquire)
LockRelationOid(rte->relid, lockmode);
else
......
......@@ -107,6 +107,6 @@ extern void DescribeLockTag(StringInfo buf, const LOCKTAG *tag);
/* Knowledge about which locktags describe temp objects */
extern bool LockTagIsTemp(const LOCKTAG *tag);
extern bool CondUpgradeRelLock(Oid relid);
extern bool CondUpgradeRelLock(Oid relid, bool noWait);
#endif /* LMGR_H */
1: set optimizer = off;
SET
create or replace view show_locks_lockmodes as select locktype, mode, granted, relation::regclass from pg_locks where gp_segment_id = -1 and locktype = 'relation' and relation::regclass::text like 't_lockmods%';
CREATE
show gp_enable_global_deadlock_detector;
gp_enable_global_deadlock_detector
------------------------------------
off
(1 row)
-- 1. The firs part of test is with
-- gp_enable_global_deadlock_detector off
-- 1.1 test for heap tables
create table t_lockmods (c int) distributed randomly;
CREATE
insert into t_lockmods select * from generate_series(1, 5);
INSERT 5
-- 1.1.1 select for update should hold ExclusiveLock on range tables
1: begin;
BEGIN
1: select * from t_lockmods for update;
c
---
4
5
1
2
3
(5 rows)
2: select * from show_locks_lockmodes;
locktype | mode | granted | relation
----------+---------------+---------+------------
relation | ExclusiveLock | t | t_lockmods
(1 row)
1: abort;
ABORT
-- 1.1.2 update | delete should hold ExclusiveLock on result relations
1: begin;
BEGIN
1: update t_lockmods set c = c + 0;
UPDATE 5
2: select * from show_locks_lockmodes;
locktype | mode | granted | relation
----------+---------------+---------+------------
relation | ExclusiveLock | t | t_lockmods
(1 row)
1: abort;
ABORT
1: begin;
BEGIN
1: delete from t_lockmods;
DELETE 5
2: select * from show_locks_lockmodes;
locktype | mode | granted | relation
----------+---------------+---------+------------
relation | ExclusiveLock | t | t_lockmods
(1 row)
1: abort;
ABORT
-- 1.1.3 insert should hold RowExclusiveLock on result relations
1: begin;
BEGIN
1: insert into t_lockmods select * from generate_series(1, 5);
INSERT 5
2: select * from show_locks_lockmodes;
locktype | mode | granted | relation
----------+------------------+---------+------------
relation | RowExclusiveLock | t | t_lockmods
(1 row)
1: abort;
ABORT
-- 1.1.4 use cached plan should be consistent with no cached plan
1: prepare select_for_update as select * from t_lockmods for update;
PREPARE
1: prepare update_tlockmods as update t_lockmods set c = c + 0;
PREPARE
1: prepare delete_tlockmods as delete from t_lockmods;
PREPARE
1: prepare insert_tlockmods as insert into t_lockmods select * from generate_series(1, 5);
PREPARE
1: begin;
BEGIN
1: execute select_for_update;
c
---
4
5
1
2
3
(5 rows)
2: select * from show_locks_lockmodes;
locktype | mode | granted | relation
----------+---------------+---------+------------
relation | ExclusiveLock | t | t_lockmods
(1 row)
1: abort;
ABORT
1: begin;
BEGIN
1: execute update_tlockmods;
EXECUTE 5
2: select * from show_locks_lockmodes;
locktype | mode | granted | relation
----------+---------------+---------+------------
relation | ExclusiveLock | t | t_lockmods
(1 row)
1: abort;
ABORT
1: begin;
BEGIN
1: execute delete_tlockmods;
EXECUTE 5
2: select * from show_locks_lockmodes;
locktype | mode | granted | relation
----------+---------------+---------+------------
relation | ExclusiveLock | t | t_lockmods
(1 row)
1: abort;
ABORT
1: begin;
BEGIN
1: execute insert_tlockmods;
EXECUTE 5
2: select * from show_locks_lockmodes;
locktype | mode | granted | relation
----------+------------------+---------+------------
relation | RowExclusiveLock | t | t_lockmods
(1 row)
1: abort;
ABORT
-- 1.2 test for AO table
create table t_lockmods_ao (c int) with (appendonly=true) distributed randomly;
CREATE
insert into t_lockmods_ao select * from generate_series(1, 8);
INSERT 8
-- 1.2.1 select for update should hold ExclusiveLock on range tables
1: begin;
BEGIN
1: select * from t_lockmods_ao for update;
c
---
2
4
1
3
5
8
6
7
(8 rows)
2: select * from show_locks_lockmodes;
locktype | mode | granted | relation
----------+---------------+---------+---------------
relation | ExclusiveLock | t | t_lockmods_ao
(1 row)
1: abort;
ABORT
-- 1.2.2 update | delete should hold ExclusiveLock on result relations
1: begin;
BEGIN
1: update t_lockmods_ao set c = c + 0;
UPDATE 8
2: select * from show_locks_lockmodes;
locktype | mode | granted | relation
----------+---------------+---------+---------------
relation | ExclusiveLock | t | t_lockmods_ao
(1 row)
1: abort;
ABORT
1: begin;
BEGIN
1: delete from t_lockmods_ao;
DELETE 8
2: select * from show_locks_lockmodes;
locktype | mode | granted | relation
----------+---------------+---------+---------------
relation | ExclusiveLock | t | t_lockmods_ao
(1 row)
1: abort;
ABORT
-- 1.2.3 insert should hold RowExclusiveLock on result relations
1: begin;
BEGIN
1: insert into t_lockmods_ao select * from generate_series(1, 5);
INSERT 5
2: select * from show_locks_lockmodes;
locktype | mode | granted | relation
----------+------------------+---------+---------------
relation | RowExclusiveLock | t | t_lockmods_ao
(1 row)
1: abort;
ABORT
-- 1.2.4 use cached plan should be consistent with no cached plan
1: prepare select_for_update_ao as select * from t_lockmods_ao for update;
PREPARE
1: prepare update_tlockmods_ao as update t_lockmods_ao set c = c + 0;
PREPARE
1: prepare delete_tlockmods_ao as delete from t_lockmods_ao;
PREPARE
1: prepare insert_tlockmods_ao as insert into t_lockmods_ao select * from generate_series(1, 5);
PREPARE
1: begin;
BEGIN
1: execute select_for_update_ao;
c
---
6
7
2
4
1
3
5
8
(8 rows)
2: select * from show_locks_lockmodes;
locktype | mode | granted | relation
----------+---------------+---------+---------------
relation | ExclusiveLock | t | t_lockmods_ao
(1 row)
1: abort;
ABORT
1: begin;
BEGIN
1: execute update_tlockmods_ao;
EXECUTE 8
2: select * from show_locks_lockmodes;
locktype | mode | granted | relation
----------+---------------+---------+---------------
relation | ExclusiveLock | t | t_lockmods_ao
(1 row)
1: abort;
ABORT
1: begin;
BEGIN
1: execute delete_tlockmods_ao;
EXECUTE 8
2: select * from show_locks_lockmodes;
locktype | mode | granted | relation
----------+---------------+---------+---------------
relation | ExclusiveLock | t | t_lockmods_ao
(1 row)
1: abort;
ABORT
1: begin;
BEGIN
1: execute insert_tlockmods_ao;
EXECUTE 5
2: select * from show_locks_lockmodes;
locktype | mode | granted | relation
----------+------------------+---------+---------------
relation | RowExclusiveLock | t | t_lockmods_ao
(1 row)
1: abort;
ABORT
1q: ... <quitting>
2q: ... <quitting>
-- start_ignore
! gpconfig -c gp_enable_global_deadlock_detector -v on;
20190401:16:37:37:002792 gpconfig:zlv:gpadmin-[INFO]:-completed successfully with parameters '-c gp_enable_global_deadlock_detector -v on'
! gpstop -rai;
20190401:16:37:37:002915 gpstop:zlv:gpadmin-[INFO]:-Starting gpstop with args: -rai
20190401:16:37:37:002915 gpstop:zlv:gpadmin-[INFO]:-Gathering information and validating the environment...
20190401:16:37:37:002915 gpstop:zlv:gpadmin-[INFO]:-Obtaining Greenplum Master catalog information
20190401:16:37:37:002915 gpstop:zlv:gpadmin-[INFO]:-Obtaining Segment details from master...
20190401:16:37:37:002915 gpstop:zlv:gpadmin-[INFO]:-Greenplum Version: 'postgres (Greenplum Database) 5.0.0-beta.3+dev.17653.g86b0b90 build dev'
20190401:16:37:37:002915 gpstop:zlv:gpadmin-[INFO]:-There are 1 connections to the database
20190401:16:37:37:002915 gpstop:zlv:gpadmin-[INFO]:-Commencing Master instance shutdown with mode='immediate'
20190401:16:37:37:002915 gpstop:zlv:gpadmin-[INFO]:-Master host=zlv
20190401:16:37:37:002915 gpstop:zlv:gpadmin-[INFO]:-Commencing Master instance shutdown with mode=immediate
20190401:16:37:37:002915 gpstop:zlv:gpadmin-[INFO]:-Master segment instance directory=/home/gpadmin/workspace/gpdb/gpAux/gpdemo/datadirs/qddir/demoDataDir-1
20190401:16:37:37:002915 gpstop:zlv:gpadmin-[INFO]:-Attempting forceful termination of any leftover master process
20190401:16:37:37:002915 gpstop:zlv:gpadmin-[INFO]:-Terminating processes for segment /home/gpadmin/workspace/gpdb/gpAux/gpdemo/datadirs/qddir/demoDataDir-1
20190401:16:37:37:002915 gpstop:zlv:gpadmin-[INFO]:-No standby master host configured
20190401:16:37:37:002915 gpstop:zlv:gpadmin-[INFO]:-Targeting dbid [2, 3, 4] for shutdown
20190401:16:37:37:002915 gpstop:zlv:gpadmin-[INFO]:-Commencing parallel segment instance shutdown, please wait...
20190401:16:37:37:002915 gpstop:zlv:gpadmin-[INFO]:-0.00% of jobs completed
20190401:16:37:38:002915 gpstop:zlv:gpadmin-[INFO]:-100.00% of jobs completed
20190401:16:37:38:002915 gpstop:zlv:gpadmin-[INFO]:-----------------------------------------------------
20190401:16:37:38:002915 gpstop:zlv:gpadmin-[INFO]:- Segments stopped successfully = 3
20190401:16:37:38:002915 gpstop:zlv:gpadmin-[INFO]:- Segments with errors during stop = 0
20190401:16:37:38:002915 gpstop:zlv:gpadmin-[INFO]:-----------------------------------------------------
20190401:16:37:38:002915 gpstop:zlv:gpadmin-[INFO]:-Successfully shutdown 3 of 3 segment instances
20190401:16:37:38:002915 gpstop:zlv:gpadmin-[INFO]:-Database successfully shutdown with no errors reported
20190401:16:37:38:002915 gpstop:zlv:gpadmin-[INFO]:-Cleaning up leftover gpmmon process
20190401:16:37:38:002915 gpstop:zlv:gpadmin-[INFO]:-No leftover gpmmon process found
20190401:16:37:38:002915 gpstop:zlv:gpadmin-[INFO]:-Cleaning up leftover gpsmon processes
20190401:16:37:38:002915 gpstop:zlv:gpadmin-[INFO]:-No leftover gpsmon processes on some hosts. not attempting forceful termination on these hosts
20190401:16:37:38:002915 gpstop:zlv:gpadmin-[INFO]:-Cleaning up leftover shared memory
20190401:16:37:39:002915 gpstop:zlv:gpadmin-[INFO]:-Restarting System...
-- end_ignore
1: set optimizer = off;
SET
2: show gp_enable_global_deadlock_detector;
gp_enable_global_deadlock_detector
------------------------------------
on
(1 row)
-- 2. The firs part of test is with
-- gp_enable_global_deadlock_detector on
-- 2.1 test for heap tables
-- 2.1.1 select for update should hold ExclusiveLock on range tables
1: begin;
BEGIN
1: select * from t_lockmods for update;
c
---
1
2
3
4
5
(5 rows)
2: select * from show_locks_lockmodes;
locktype | mode | granted | relation
----------+---------------+---------+------------
relation | ExclusiveLock | t | t_lockmods
(1 row)
1: abort;
ABORT
-- 2.1.2 update | delete should hold RowExclusiveLock on result relations
1: begin;
BEGIN
1: update t_lockmods set c = c + 0;
UPDATE 5
2: select * from show_locks_lockmodes;
locktype | mode | granted | relation
----------+------------------+---------+------------
relation | RowExclusiveLock | t | t_lockmods
(1 row)
1: abort;
ABORT
1: begin;
BEGIN
1: delete from t_lockmods;
DELETE 5
2: select * from show_locks_lockmodes;
locktype | mode | granted | relation
----------+------------------+---------+------------
relation | RowExclusiveLock | t | t_lockmods
(1 row)
1: abort;
ABORT
-- 2.1.3 insert should hold RowExclusiveLock on result relations
1: begin;
BEGIN
1: insert into t_lockmods select * from generate_series(1, 5);
INSERT 5
2: select * from show_locks_lockmodes;
locktype | mode | granted | relation
----------+------------------+---------+------------
relation | RowExclusiveLock | t | t_lockmods
(1 row)
1: abort;
ABORT
-- 2.1.4 use cached plan should be consistent with no cached plan
1: prepare select_for_update as select * from t_lockmods for update;
PREPARE
1: prepare update_tlockmods as update t_lockmods set c = c + 0;
PREPARE
1: prepare delete_tlockmods as delete from t_lockmods;
PREPARE
1: prepare insert_tlockmods as insert into t_lockmods select * from generate_series(1, 5);
PREPARE
1: begin;
BEGIN
1: execute select_for_update;
c
---
1
2
3
4
5
(5 rows)
2: select * from show_locks_lockmodes;
locktype | mode | granted | relation
----------+---------------+---------+------------
relation | ExclusiveLock | t | t_lockmods
(1 row)
1: abort;
ABORT
1: begin;
BEGIN
1: execute update_tlockmods;
EXECUTE 5
2: select * from show_locks_lockmodes;
locktype | mode | granted | relation
----------+------------------+---------+------------
relation | RowExclusiveLock | t | t_lockmods
(1 row)
1: abort;
ABORT
1: begin;
BEGIN
1: execute delete_tlockmods;
EXECUTE 5
2: select * from show_locks_lockmodes;
locktype | mode | granted | relation
----------+------------------+---------+------------
relation | RowExclusiveLock | t | t_lockmods
(1 row)
1: abort;
ABORT
1: begin;
BEGIN
1: execute insert_tlockmods;
EXECUTE 5
2: select * from show_locks_lockmodes;
locktype | mode | granted | relation
----------+------------------+---------+------------
relation | RowExclusiveLock | t | t_lockmods
(1 row)
1: abort;
ABORT
-- 2.2 test for AO table
-- 2.2.1 select for update should hold ExclusiveLock on range tables
1: begin;
BEGIN
1: select * from t_lockmods_ao for update;
c
---
1
3
5
8
6
7
2
4
(8 rows)
2: select * from show_locks_lockmodes;
locktype | mode | granted | relation
----------+---------------+---------+---------------
relation | ExclusiveLock | t | t_lockmods_ao
(1 row)
1: abort;
ABORT
-- 2.2.2 update | delete should hold ExclusiveLock on result relations
1: begin;
BEGIN
1: update t_lockmods_ao set c = c + 0;
UPDATE 8
2: select * from show_locks_lockmodes;
locktype | mode | granted | relation
----------+---------------+---------+---------------
relation | ExclusiveLock | t | t_lockmods_ao
(1 row)
1: abort;
ABORT
1: begin;
BEGIN
1: delete from t_lockmods_ao;
DELETE 8
2: select * from show_locks_lockmodes;
locktype | mode | granted | relation
----------+---------------+---------+---------------
relation | ExclusiveLock | t | t_lockmods_ao
(1 row)
1: abort;
ABORT
-- 2.2.3 insert should hold RowExclusiveLock on result relations
1: begin;
BEGIN
1: insert into t_lockmods_ao select * from generate_series(1, 5);
INSERT 5
2: select * from show_locks_lockmodes;
locktype | mode | granted | relation
----------+------------------+---------+---------------
relation | RowExclusiveLock | t | t_lockmods_ao
(1 row)
1: abort;
ABORT
-- 2.2.4 use cached plan should be consistent with no cached plan
1: prepare select_for_update_ao as select * from t_lockmods_ao for update;
PREPARE
1: prepare update_tlockmods_ao as update t_lockmods_ao set c = c + 0;
PREPARE
1: prepare delete_tlockmods_ao as delete from t_lockmods_ao;
PREPARE
1: prepare insert_tlockmods_ao as insert into t_lockmods_ao select * from generate_series(1, 5);
PREPARE
1: begin;
BEGIN
1: execute select_for_update_ao;
c
---
2
4
1
3
5
8
6
7
(8 rows)
2: select * from show_locks_lockmodes;
locktype | mode | granted | relation
----------+---------------+---------+---------------
relation | ExclusiveLock | t | t_lockmods_ao
(1 row)
1: abort;
ABORT
1: begin;
BEGIN
1: execute update_tlockmods_ao;
EXECUTE 8
2: select * from show_locks_lockmodes;
locktype | mode | granted | relation
----------+---------------+---------+---------------
relation | ExclusiveLock | t | t_lockmods_ao
(1 row)
1: abort;
ABORT
1: begin;
BEGIN
1: execute delete_tlockmods_ao;
EXECUTE 8
2: select * from show_locks_lockmodes;
locktype | mode | granted | relation
----------+---------------+---------+---------------
relation | ExclusiveLock | t | t_lockmods_ao
(1 row)
1: abort;
ABORT
1: begin;
BEGIN
1: execute insert_tlockmods_ao;
EXECUTE 5
2: select * from show_locks_lockmodes;
locktype | mode | granted | relation
----------+------------------+---------+---------------
relation | RowExclusiveLock | t | t_lockmods_ao
(1 row)
1: abort;
ABORT
1q: ... <quitting>
2q: ... <quitting>
-- start_ignore
! gpconfig -c gp_enable_global_deadlock_detector -v off;
20190401:16:37:42:003348 gpconfig:zlv:gpadmin-[INFO]:-completed successfully with parameters '-c gp_enable_global_deadlock_detector -v off'
! gpstop -rai;
20190401:16:37:42:003471 gpstop:zlv:gpadmin-[INFO]:-Starting gpstop with args: -rai
20190401:16:37:42:003471 gpstop:zlv:gpadmin-[INFO]:-Gathering information and validating the environment...
20190401:16:37:42:003471 gpstop:zlv:gpadmin-[INFO]:-Obtaining Greenplum Master catalog information
20190401:16:37:42:003471 gpstop:zlv:gpadmin-[INFO]:-Obtaining Segment details from master...
20190401:16:37:42:003471 gpstop:zlv:gpadmin-[INFO]:-Greenplum Version: 'postgres (Greenplum Database) 5.0.0-beta.3+dev.17653.g86b0b90 build dev'
20190401:16:37:42:003471 gpstop:zlv:gpadmin-[INFO]:-There are 0 connections to the database
20190401:16:37:42:003471 gpstop:zlv:gpadmin-[INFO]:-Commencing Master instance shutdown with mode='immediate'
20190401:16:37:42:003471 gpstop:zlv:gpadmin-[INFO]:-Master host=zlv
20190401:16:37:42:003471 gpstop:zlv:gpadmin-[INFO]:-Commencing Master instance shutdown with mode=immediate
20190401:16:37:42:003471 gpstop:zlv:gpadmin-[INFO]:-Master segment instance directory=/home/gpadmin/workspace/gpdb/gpAux/gpdemo/datadirs/qddir/demoDataDir-1
20190401:16:37:42:003471 gpstop:zlv:gpadmin-[INFO]:-Attempting forceful termination of any leftover master process
20190401:16:37:42:003471 gpstop:zlv:gpadmin-[INFO]:-Terminating processes for segment /home/gpadmin/workspace/gpdb/gpAux/gpdemo/datadirs/qddir/demoDataDir-1
20190401:16:37:42:003471 gpstop:zlv:gpadmin-[INFO]:-No standby master host configured
20190401:16:37:42:003471 gpstop:zlv:gpadmin-[INFO]:-Targeting dbid [2, 3, 4] for shutdown
20190401:16:37:42:003471 gpstop:zlv:gpadmin-[INFO]:-Commencing parallel segment instance shutdown, please wait...
20190401:16:37:42:003471 gpstop:zlv:gpadmin-[INFO]:-0.00% of jobs completed
20190401:16:37:43:003471 gpstop:zlv:gpadmin-[INFO]:-100.00% of jobs completed
20190401:16:37:43:003471 gpstop:zlv:gpadmin-[INFO]:-----------------------------------------------------
20190401:16:37:43:003471 gpstop:zlv:gpadmin-[INFO]:- Segments stopped successfully = 3
20190401:16:37:43:003471 gpstop:zlv:gpadmin-[INFO]:- Segments with errors during stop = 0
20190401:16:37:43:003471 gpstop:zlv:gpadmin-[INFO]:-----------------------------------------------------
20190401:16:37:43:003471 gpstop:zlv:gpadmin-[INFO]:-Successfully shutdown 3 of 3 segment instances
20190401:16:37:43:003471 gpstop:zlv:gpadmin-[INFO]:-Database successfully shutdown with no errors reported
20190401:16:37:43:003471 gpstop:zlv:gpadmin-[INFO]:-Cleaning up leftover gpmmon process
20190401:16:37:43:003471 gpstop:zlv:gpadmin-[INFO]:-No leftover gpmmon process found
20190401:16:37:43:003471 gpstop:zlv:gpadmin-[INFO]:-Cleaning up leftover gpsmon processes
20190401:16:37:43:003471 gpstop:zlv:gpadmin-[INFO]:-No leftover gpsmon processes on some hosts. not attempting forceful termination on these hosts
20190401:16:37:43:003471 gpstop:zlv:gpadmin-[INFO]:-Cleaning up leftover shared memory
20190401:16:37:43:003471 gpstop:zlv:gpadmin-[INFO]:-Restarting System...
-- end_ignore
1: show gp_enable_global_deadlock_detector;
gp_enable_global_deadlock_detector
------------------------------------
off
(1 row)
1q: ... <quitting>
test: lockmodes
test: ao_partition_lock
test: select_dropped_table
......
1: set optimizer = off;
create or replace view show_locks_lockmodes as
select locktype, mode, granted, relation::regclass
from pg_locks
where
gp_segment_id = -1 and
locktype = 'relation' and
relation::regclass::text like 't_lockmods%';
show gp_enable_global_deadlock_detector;
-- 1. The firs part of test is with
-- gp_enable_global_deadlock_detector off
-- 1.1 test for heap tables
create table t_lockmods (c int) distributed randomly;
insert into t_lockmods select * from generate_series(1, 5);
-- 1.1.1 select for update should hold ExclusiveLock on range tables
1: begin;
1: select * from t_lockmods for update;
2: select * from show_locks_lockmodes;
1: abort;
-- 1.1.2 update | delete should hold ExclusiveLock on result relations
1: begin;
1: update t_lockmods set c = c + 0;
2: select * from show_locks_lockmodes;
1: abort;
1: begin;
1: delete from t_lockmods;
2: select * from show_locks_lockmodes;
1: abort;
-- 1.1.3 insert should hold RowExclusiveLock on result relations
1: begin;
1: insert into t_lockmods select * from generate_series(1, 5);
2: select * from show_locks_lockmodes;
1: abort;
-- 1.1.4 use cached plan should be consistent with no cached plan
1: prepare select_for_update as select * from t_lockmods for update;
1: prepare update_tlockmods as update t_lockmods set c = c + 0;
1: prepare delete_tlockmods as delete from t_lockmods;
1: prepare insert_tlockmods as insert into t_lockmods select * from generate_series(1, 5);
1: begin;
1: execute select_for_update;
2: select * from show_locks_lockmodes;
1: abort;
1: begin;
1: execute update_tlockmods;
2: select * from show_locks_lockmodes;
1: abort;
1: begin;
1: execute delete_tlockmods;
2: select * from show_locks_lockmodes;
1: abort;
1: begin;
1: execute insert_tlockmods;
2: select * from show_locks_lockmodes;
1: abort;
-- 1.2 test for AO table
create table t_lockmods_ao (c int) with (appendonly=true) distributed randomly;
insert into t_lockmods_ao select * from generate_series(1, 8);
-- 1.2.1 select for update should hold ExclusiveLock on range tables
1: begin;
1: select * from t_lockmods_ao for update;
2: select * from show_locks_lockmodes;
1: abort;
-- 1.2.2 update | delete should hold ExclusiveLock on result relations
1: begin;
1: update t_lockmods_ao set c = c + 0;
2: select * from show_locks_lockmodes;
1: abort;
1: begin;
1: delete from t_lockmods_ao;
2: select * from show_locks_lockmodes;
1: abort;
-- 1.2.3 insert should hold RowExclusiveLock on result relations
1: begin;
1: insert into t_lockmods_ao select * from generate_series(1, 5);
2: select * from show_locks_lockmodes;
1: abort;
-- 1.2.4 use cached plan should be consistent with no cached plan
1: prepare select_for_update_ao as select * from t_lockmods_ao for update;
1: prepare update_tlockmods_ao as update t_lockmods_ao set c = c + 0;
1: prepare delete_tlockmods_ao as delete from t_lockmods_ao;
1: prepare insert_tlockmods_ao as insert into t_lockmods_ao select * from generate_series(1, 5);
1: begin;
1: execute select_for_update_ao;
2: select * from show_locks_lockmodes;
1: abort;
1: begin;
1: execute update_tlockmods_ao;
2: select * from show_locks_lockmodes;
1: abort;
1: begin;
1: execute delete_tlockmods_ao;
2: select * from show_locks_lockmodes;
1: abort;
1: begin;
1: execute insert_tlockmods_ao;
2: select * from show_locks_lockmodes;
1: abort;
1q:
2q:
-- start_ignore
! gpconfig -c gp_enable_global_deadlock_detector -v on;
! gpstop -rai;
-- end_ignore
1: set optimizer = off;
2: show gp_enable_global_deadlock_detector;
-- 2. The firs part of test is with
-- gp_enable_global_deadlock_detector on
-- 2.1 test for heap tables
-- 2.1.1 select for update should hold ExclusiveLock on range tables
1: begin;
1: select * from t_lockmods for update;
2: select * from show_locks_lockmodes;
1: abort;
-- 2.1.2 update | delete should hold RowExclusiveLock on result relations
1: begin;
1: update t_lockmods set c = c + 0;
2: select * from show_locks_lockmodes;
1: abort;
1: begin;
1: delete from t_lockmods;
2: select * from show_locks_lockmodes;
1: abort;
-- 2.1.3 insert should hold RowExclusiveLock on result relations
1: begin;
1: insert into t_lockmods select * from generate_series(1, 5);
2: select * from show_locks_lockmodes;
1: abort;
-- 2.1.4 use cached plan should be consistent with no cached plan
1: prepare select_for_update as select * from t_lockmods for update;
1: prepare update_tlockmods as update t_lockmods set c = c + 0;
1: prepare delete_tlockmods as delete from t_lockmods;
1: prepare insert_tlockmods as insert into t_lockmods select * from generate_series(1, 5);
1: begin;
1: execute select_for_update;
2: select * from show_locks_lockmodes;
1: abort;
1: begin;
1: execute update_tlockmods;
2: select * from show_locks_lockmodes;
1: abort;
1: begin;
1: execute delete_tlockmods;
2: select * from show_locks_lockmodes;
1: abort;
1: begin;
1: execute insert_tlockmods;
2: select * from show_locks_lockmodes;
1: abort;
-- 2.2 test for AO table
-- 2.2.1 select for update should hold ExclusiveLock on range tables
1: begin;
1: select * from t_lockmods_ao for update;
2: select * from show_locks_lockmodes;
1: abort;
-- 2.2.2 update | delete should hold ExclusiveLock on result relations
1: begin;
1: update t_lockmods_ao set c = c + 0;
2: select * from show_locks_lockmodes;
1: abort;
1: begin;
1: delete from t_lockmods_ao;
2: select * from show_locks_lockmodes;
1: abort;
-- 2.2.3 insert should hold RowExclusiveLock on result relations
1: begin;
1: insert into t_lockmods_ao select * from generate_series(1, 5);
2: select * from show_locks_lockmodes;
1: abort;
-- 2.2.4 use cached plan should be consistent with no cached plan
1: prepare select_for_update_ao as select * from t_lockmods_ao for update;
1: prepare update_tlockmods_ao as update t_lockmods_ao set c = c + 0;
1: prepare delete_tlockmods_ao as delete from t_lockmods_ao;
1: prepare insert_tlockmods_ao as insert into t_lockmods_ao select * from generate_series(1, 5);
1: begin;
1: execute select_for_update_ao;
2: select * from show_locks_lockmodes;
1: abort;
1: begin;
1: execute update_tlockmods_ao;
2: select * from show_locks_lockmodes;
1: abort;
1: begin;
1: execute delete_tlockmods_ao;
2: select * from show_locks_lockmodes;
1: abort;
1: begin;
1: execute insert_tlockmods_ao;
2: select * from show_locks_lockmodes;
1: abort;
1q:
2q:
-- start_ignore
! gpconfig -c gp_enable_global_deadlock_detector -v off;
! gpstop -rai;
-- end_ignore
1: show gp_enable_global_deadlock_detector;
1q:
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册