try last block for insert only when nobody holds locks on it.

Offering: openGaussDev

More detail: try last block for insert only when nobody holds locks on it.

Signed-off-by: Liu Rong <liurong25@huawei.com>

Match-id-d4b2af2c21d441ce9d2dcd6205adf5c2f4aa0581
This commit is contained in:
openGaussDev 2022-03-07 17:30:20 +08:00 committed by yanghao
parent 794f5e1dde
commit 8b5d92a0e2
4 changed files with 97 additions and 2 deletions

View File

@ -382,6 +382,15 @@ Buffer RelationGetBufferForTuple(Relation relation, Size len, Buffer other_buffe
Size extralen = 0;
HeapPageHeader phdr;
/*
* Blocks that extended one by one are different from bulk-extend blocks, and
* are not recorded into FSM. As its creator session close this realtion, they
* can not be used by any other body. It is especially obvious for partition
* bulk insert. Here, if no avaiable found in FSM, we check the last block to
* reuse the 'leaked free space' mentioned earlier.
*/
bool test_last_block = false;
len = MAXALIGN(len); /* be conservative */
/* Bulk insert is not supported for updates, only inserts. */
@ -482,7 +491,14 @@ loop:
if (PageIsAllVisible(BufferGetPage(buffer))) {
visibilitymap_pin(relation, target_block, vmbuffer);
}
LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
if (!TryLockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE, !test_last_block)) {
Assert(test_last_block);
ReleaseBuffer(buffer);
/* someone is using this block, give up and extend a new one. */
break;
}
} else if (other_block == target_block) {
/* also easy case */
buffer = other_buffer;
@ -574,6 +590,21 @@ loop:
ereport(DEBUG5, (errmodule(MOD_SEGMENT_PAGE),
errmsg("RelationGetBufferForTuple, get target block %u from FSM, nblocks in relation is %u",
target_block, smgrnblocks(relation->rd_smgr, MAIN_FORKNUM))));
/*
* If the FSM knows nothing of the rel, try the last page before we
* give up and extend. This's intend to use pages that are extended
* one by one and not recorded in FSM as possible.
*
* The best is to record all pages into FSM using bulk-extend in later.
*/
if (target_block == InvalidBlockNumber && !test_last_block && other_buffer == InvalidBuffer) {
BlockNumber nblocks = RelationGetNumberOfBlocks(relation);
if (nblocks > 0) {
target_block = nblocks - 1;
}
test_last_block = true;
}
}
/*

View File

@ -52,6 +52,15 @@ Buffer RelationGetBufferForUTuple(Relation relation, Size len, Buffer otherBuffe
BlockNumber otherBlock;
bool needLock = false;
/*
* Blocks that extended one by one are different from bulk-extend blocks, and
* are not recorded into FSM. As its creator session close this realtion, they
* can not be used by any other body. It is especially obvious for partition
* bulk insert. Here, if no avaiable found in FSM, we check the last block to
* reuse the 'leaked free space' mentioned earlier.
*/
bool test_last_block = false;
len = SHORTALIGN(len);
/*
* If we're gonna fail for oversize tuple, do it right away
@ -145,7 +154,13 @@ loop:
if (otherBuffer == InvalidBuffer) {
/* easy case */
buffer = ReadBufferBI(relation, targetBlock, bistate);
LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
if (!TryLockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE, !test_last_block)) {
Assert(test_last_block);
ReleaseBuffer(buffer);
/* someone is using this block, give up and extend. */
break;
}
} else if (otherBlock == targetBlock) {
buffer = otherBuffer;
/* also easy case */
@ -210,6 +225,21 @@ loop:
* to try.
*/
targetBlock = RecordAndGetPageWithFreeSpace(relation, targetBlock, pageFreeSpace, len + saveFreeSpace);
/*
* If the FSM knows nothing of the rel, try the last page before we
* give up and extend. This's intend to use pages that are extended
* one by one and not recorded in FSM as possible.
*
* The best is to record all pages into FSM using bulk-extend in later.
*/
if (targetBlock == InvalidBlockNumber && !test_last_block && otherBuffer == InvalidBuffer) {
BlockNumber nblocks = RelationGetNumberOfBlocks(relation);
if (nblocks > 0) {
targetBlock = nblocks - 1;
}
test_last_block = true;
}
}
/*

View File

@ -5545,6 +5545,39 @@ void LockBuffer(Buffer buffer, int mode)
}
}
/*
* Try to acquire the content_lock for the buffer if must_wait is false.
* If the content lock is not available, return FALSE with no side-effects.
*/
bool TryLockBuffer(Buffer buffer, int mode, bool must_wait)
{
Assert(BufferIsValid(buffer));
/* without tries, act as LockBuffer */
if (must_wait) {
LockBuffer(buffer, mode);
return true;
}
/* local buffers need no lock */
if (BufferIsLocal(buffer)) {
return true;
}
volatile BufferDesc *buf = GetBufferDescriptor(buffer - 1);
if (mode == BUFFER_LOCK_SHARE) {
return LWLockConditionalAcquire(buf->content_lock, LW_SHARED);
} else if (mode == BUFFER_LOCK_EXCLUSIVE) {
return LWLockConditionalAcquire(buf->content_lock, LW_EXCLUSIVE);
} else {
ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH),
(errmsg("unrecognized buffer lock mode for TryLockBuffer: %d", mode))));
}
return false;
}
/*
* Acquire the content_lock for the buffer, but only if we don't have to wait.
*

View File

@ -321,6 +321,7 @@ extern void MarkBufferDirtyHint(Buffer buffer, bool buffer_std);
extern void FlushOneBuffer(Buffer buffer);
extern void UnlockBuffers(void);
extern void LockBuffer(Buffer buffer, int mode);
extern bool TryLockBuffer(Buffer buffer, int mode, bool must_wait);
extern bool ConditionalLockBuffer(Buffer buffer);
extern void LockBufferForCleanup(Buffer buffer);
extern bool ConditionalLockBufferForCleanup(Buffer buffer);