diff --git a/src/gausskernel/storage/access/heap/hio.cpp b/src/gausskernel/storage/access/heap/hio.cpp index 5a556c096..0061ab0b4 100644 --- a/src/gausskernel/storage/access/heap/hio.cpp +++ b/src/gausskernel/storage/access/heap/hio.cpp @@ -382,6 +382,15 @@ Buffer RelationGetBufferForTuple(Relation relation, Size len, Buffer other_buffe Size extralen = 0; HeapPageHeader phdr; + /* + * Blocks that extended one by one are different from bulk-extend blocks, and + * are not recorded into FSM. As its creator session close this realtion, they + * can not be used by any other body. It is especially obvious for partition + * bulk insert. Here, if no avaiable found in FSM, we check the last block to + * reuse the 'leaked free space' mentioned earlier. + */ + bool test_last_block = false; + len = MAXALIGN(len); /* be conservative */ /* Bulk insert is not supported for updates, only inserts. */ @@ -482,7 +491,14 @@ loop: if (PageIsAllVisible(BufferGetPage(buffer))) { visibilitymap_pin(relation, target_block, vmbuffer); } - LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE); + + if (!TryLockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE, !test_last_block)) { + Assert(test_last_block); + ReleaseBuffer(buffer); + + /* someone is using this block, give up and extend a new one. */ + break; + } } else if (other_block == target_block) { /* also easy case */ buffer = other_buffer; @@ -574,6 +590,21 @@ loop: ereport(DEBUG5, (errmodule(MOD_SEGMENT_PAGE), errmsg("RelationGetBufferForTuple, get target block %u from FSM, nblocks in relation is %u", target_block, smgrnblocks(relation->rd_smgr, MAIN_FORKNUM)))); + + /* + * If the FSM knows nothing of the rel, try the last page before we + * give up and extend. This's intend to use pages that are extended + * one by one and not recorded in FSM as possible. + * + * The best is to record all pages into FSM using bulk-extend in later. + */ + if (target_block == InvalidBlockNumber && !test_last_block && other_buffer == InvalidBuffer) { + BlockNumber nblocks = RelationGetNumberOfBlocks(relation); + if (nblocks > 0) { + target_block = nblocks - 1; + } + test_last_block = true; + } } /* diff --git a/src/gausskernel/storage/access/ustore/knl_uhio.cpp b/src/gausskernel/storage/access/ustore/knl_uhio.cpp index cfc60b99e..586c98038 100644 --- a/src/gausskernel/storage/access/ustore/knl_uhio.cpp +++ b/src/gausskernel/storage/access/ustore/knl_uhio.cpp @@ -52,6 +52,15 @@ Buffer RelationGetBufferForUTuple(Relation relation, Size len, Buffer otherBuffe BlockNumber otherBlock; bool needLock = false; + /* + * Blocks that extended one by one are different from bulk-extend blocks, and + * are not recorded into FSM. As its creator session close this realtion, they + * can not be used by any other body. It is especially obvious for partition + * bulk insert. Here, if no avaiable found in FSM, we check the last block to + * reuse the 'leaked free space' mentioned earlier. + */ + bool test_last_block = false; + len = SHORTALIGN(len); /* * If we're gonna fail for oversize tuple, do it right away @@ -145,7 +154,13 @@ loop: if (otherBuffer == InvalidBuffer) { /* easy case */ buffer = ReadBufferBI(relation, targetBlock, bistate); - LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE); + if (!TryLockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE, !test_last_block)) { + Assert(test_last_block); + ReleaseBuffer(buffer); + + /* someone is using this block, give up and extend. */ + break; + } } else if (otherBlock == targetBlock) { buffer = otherBuffer; /* also easy case */ @@ -210,6 +225,21 @@ loop: * to try. */ targetBlock = RecordAndGetPageWithFreeSpace(relation, targetBlock, pageFreeSpace, len + saveFreeSpace); + + /* + * If the FSM knows nothing of the rel, try the last page before we + * give up and extend. This's intend to use pages that are extended + * one by one and not recorded in FSM as possible. + * + * The best is to record all pages into FSM using bulk-extend in later. + */ + if (targetBlock == InvalidBlockNumber && !test_last_block && otherBuffer == InvalidBuffer) { + BlockNumber nblocks = RelationGetNumberOfBlocks(relation); + if (nblocks > 0) { + targetBlock = nblocks - 1; + } + test_last_block = true; + } } /* diff --git a/src/gausskernel/storage/buffer/bufmgr.cpp b/src/gausskernel/storage/buffer/bufmgr.cpp index 3fbd4525c..121dc1e97 100644 --- a/src/gausskernel/storage/buffer/bufmgr.cpp +++ b/src/gausskernel/storage/buffer/bufmgr.cpp @@ -5545,6 +5545,39 @@ void LockBuffer(Buffer buffer, int mode) } } +/* + * Try to acquire the content_lock for the buffer if must_wait is false. + * If the content lock is not available, return FALSE with no side-effects. + */ +bool TryLockBuffer(Buffer buffer, int mode, bool must_wait) +{ + Assert(BufferIsValid(buffer)); + + /* without tries, act as LockBuffer */ + if (must_wait) { + LockBuffer(buffer, mode); + return true; + } + + /* local buffers need no lock */ + if (BufferIsLocal(buffer)) { + return true; + } + + volatile BufferDesc *buf = GetBufferDescriptor(buffer - 1); + + if (mode == BUFFER_LOCK_SHARE) { + return LWLockConditionalAcquire(buf->content_lock, LW_SHARED); + } else if (mode == BUFFER_LOCK_EXCLUSIVE) { + return LWLockConditionalAcquire(buf->content_lock, LW_EXCLUSIVE); + } else { + ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), + (errmsg("unrecognized buffer lock mode for TryLockBuffer: %d", mode)))); + } + + return false; +} + /* * Acquire the content_lock for the buffer, but only if we don't have to wait. * diff --git a/src/include/storage/buf/bufmgr.h b/src/include/storage/buf/bufmgr.h index 9898aca37..a2d1f3f3c 100644 --- a/src/include/storage/buf/bufmgr.h +++ b/src/include/storage/buf/bufmgr.h @@ -321,6 +321,7 @@ extern void MarkBufferDirtyHint(Buffer buffer, bool buffer_std); extern void FlushOneBuffer(Buffer buffer); extern void UnlockBuffers(void); extern void LockBuffer(Buffer buffer, int mode); +extern bool TryLockBuffer(Buffer buffer, int mode, bool must_wait); extern bool ConditionalLockBuffer(Buffer buffer); extern void LockBufferForCleanup(Buffer buffer); extern bool ConditionalLockBufferForCleanup(Buffer buffer);