!2665 indexscan优化

Merge pull request !2665 from ZYM/dev
This commit is contained in:
opengauss-bot 2022-12-24 02:16:37 +00:00 committed by Gitee
commit 63bb0fd266
No known key found for this signature in database
GPG Key ID: 173E9B9CA92EEF8F
16 changed files with 198 additions and 90 deletions

View File

@ -829,4 +829,5 @@ cost_weight_index|real|1e-10,1e+10|NULL|NULL|
default_limit_rows|real|-100,1.79769e+308|NULL|NULL|
enable_auto_explain|bool|0,0|NULL|NULL|
auto_explain_level|enum|off,log,notice|NULL|NULL|
enable_indexscan_optimization|bool|0,0|NULL|NULL|
[end]

View File

@ -1318,6 +1318,17 @@ static void InitSqlConfigureNamesBool()
NULL,
NULL,
NULL},
{{"enable_indexscan_optimization",
PGC_USERSET,
NODE_ALL,
UNGROUPED,
gettext_noop("Enables indexscan optimization."),
NULL},
&u_sess->attr.attr_common.enable_indexscan_optimization,
false,
NULL,
NULL,
NULL},
#ifndef ENABLE_MULTIPLE_NODES
{{"enable_beta_opfusion",

View File

@ -31,7 +31,7 @@
* inside an EvalPlanQual recheck. If we aren't, just execute
* the access method's next-tuple routine.
*/
static TupleTableSlot* ExecScanFetch(ScanState* node, ExecScanAccessMtd access_mtd, ExecScanRecheckMtd recheck_mtd)
static inline TupleTableSlot* ExecScanFetch(ScanState* node, ExecScanAccessMtd access_mtd, ExecScanRecheckMtd recheck_mtd)
{
EState* estate = node->ps.state;

View File

@ -373,11 +373,13 @@ TupleTableSlot* ExecStoreTuple(Tuple tuple, TupleTableSlot* slot, Buffer buffer,
Assert(slot != NULL);
Assert(slot->tts_tupleDescriptor != NULL);
HeapTuple htup = (HeapTuple)tuple;
if (TTS_TABLEAM_IS_USTORE(slot) && htup->tupTableType == HEAP_TUPLE) {
tuple = (Tuple)HeapToUHeap(slot->tts_tupleDescriptor, (HeapTuple)tuple);
} else if (TTS_TABLEAM_IS_HEAP(slot) && htup->tupTableType == UHEAP_TUPLE) {
tuple = (Tuple)UHeapToHeap(slot->tts_tupleDescriptor, (UHeapTuple)tuple);
if (!u_sess->attr.attr_common.enable_indexscan_optimization) {
HeapTuple htup = (HeapTuple)tuple;
if (TTS_TABLEAM_IS_USTORE(slot) && htup->tupTableType == HEAP_TUPLE) {
tuple = (Tuple)HeapToUHeap(slot->tts_tupleDescriptor, (HeapTuple)tuple);
} else if (TTS_TABLEAM_IS_HEAP(slot) && htup->tupTableType == UHEAP_TUPLE) {
tuple = (Tuple)UHeapToHeap(slot->tts_tupleDescriptor, (UHeapTuple)tuple);
}
}
tableam_tslot_store_tuple(tuple, slot, buffer, should_free, false);

View File

@ -119,7 +119,6 @@ static TupleTableSlot* IndexOnlyNext(IndexOnlyScanState* node)
TupleTableSlot* slot = NULL;
TupleTableSlot* tmpslot = NULL;
ItemPointer tid;
bool isVersionScan = TvIsVersionScan(&node->ss);
bool isUHeap = false;
/*
@ -138,8 +137,11 @@ static TupleTableSlot* IndexOnlyNext(IndexOnlyScanState* node)
econtext = node->ss.ps.ps_ExprContext;
slot = node->ss.ss_ScanTupleSlot;
isUHeap = RelationIsUstoreFormat(node->ss.ss_currentRelation);
tmpslot = MakeSingleTupleTableSlot(RelationGetDescr(scandesc->heapRelation),
if (isUHeap) {
tmpslot = MakeSingleTupleTableSlot(RelationGetDescr(scandesc->heapRelation),
false, scandesc->heapRelation->rd_tam_ops);
}
/*
* OK, now that we have what we need, fetch the next tuple.
@ -168,11 +170,13 @@ static TupleTableSlot* IndexOnlyNext(IndexOnlyScanState* node)
* reading the TID; and (2) is satisfied by the acquisition of the
* buffer content lock in order to insert the TID.
*/
if (!ExecGPIGetNextPartRelation(node, indexScan)) {
continue;
}
if (!ExecCBIFixHBktRel(scandesc, &node->ioss_VMBuffer)) {
continue;
if (!u_sess->attr.attr_common.enable_indexscan_optimization) {
if (!ExecGPIGetNextPartRelation(node, indexScan)) {
continue;
}
if (!ExecCBIFixHBktRel(scandesc, &node->ioss_VMBuffer)) {
continue;
}
}
if (isUHeap) {
@ -186,14 +190,13 @@ static TupleTableSlot* IndexOnlyNext(IndexOnlyScanState* node)
continue; /* the visible version not match the IndexTuple */
}
}
} else if (isVersionScan ||
!visibilitymap_test(indexScan->heapRelation, ItemPointerGetBlockNumber(tid), &node->ioss_VMBuffer)) {
} else if (!visibilitymap_test(indexScan->heapRelation, ItemPointerGetBlockNumber(tid), &node->ioss_VMBuffer)) {
/* IMPORTANT: We ALWAYS visit the heap to check visibility in VERSION SCAN. */
/*
* Rats, we have to visit the heap to check visibility.
*/
node->ioss_HeapFetches++;
if (!IndexFetchSlot(indexScan, slot, isUHeap)) {
if (IndexFetchTuple(indexScan) == NULL) {
#ifdef DEBUG_INPLACE
/* Now ustore does not support hash bucket table */
Assert(indexScan == scandesc);
@ -205,6 +208,7 @@ static TupleTableSlot* IndexOnlyNext(IndexOnlyScanState* node)
#endif
continue; /* no visible tuple, try next index entry */
}
#ifdef DEBUG_INPLACE
Assert(indexScan == scandesc);
@ -262,7 +266,9 @@ static TupleTableSlot* IndexOnlyNext(IndexOnlyScanState* node)
*/
if (tuple == NULL)
PredicateLockPage(indexScan->heapRelation, ItemPointerGetBlockNumber(tid), estate->es_snapshot);
ExecDropSingleTupleTableSlot(tmpslot);
if (isUHeap) {
ExecDropSingleTupleTableSlot(tmpslot);
}
return slot;
}
@ -270,7 +276,9 @@ static TupleTableSlot* IndexOnlyNext(IndexOnlyScanState* node)
* if we get here it means the index scan failed so we are at the end of
* the scan..
*/
ExecDropSingleTupleTableSlot(tmpslot);
if (isUHeap) {
ExecDropSingleTupleTableSlot(tmpslot);
}
return ExecClearTuple(slot);
}

View File

@ -81,7 +81,9 @@ static TupleTableSlot* IndexNext(IndexScanState* node)
econtext = node->ss.ps.ps_ExprContext;
slot = node->ss.ss_ScanTupleSlot;
isUstore = RelationIsUstoreFormat(node->ss.ss_currentRelation);
if (!u_sess->attr.attr_common.enable_indexscan_optimization) {
isUstore = RelationIsUstoreFormat(node->ss.ss_currentRelation);
}
/*
* ok, now that we have what we need, fetch the next tuple.
@ -108,10 +110,14 @@ static TupleTableSlot* IndexNext(IndexScanState* node)
* Note: we pass 'false' because tuples returned by amgetnext are
* pointers onto disk pages and must not be pfree_ext()'d.
*/
(void)ExecStoreTuple(tuple, /* tuple to store */
slot, /* slot to store in */
indexScan->xs_cbuf, /* buffer containing tuple */
false); /* don't pfree */
if (!u_sess->attr.attr_common.enable_indexscan_optimization)
(void)ExecStoreTuple(tuple, /* tuple to store */
slot, /* slot to store in */
indexScan->xs_cbuf, /* buffer containing tuple */
false); /* don't pfree */
else {
heap_slot_store_heap_tuple(tuple, slot, indexScan->xs_cbuf, false, false);
}
}
/*

View File

@ -311,9 +311,9 @@ Datum nocache_index_getattr(IndexTuple tup, uint32 attnum, TupleDesc tuple_desc)
* cached offsets for these attrs.
*/
if (IndexTupleHasVarwidths(tup)) {
uint32 j;
int j;
for (j = 0; j <= attnum; j++) {
for (j = 0; j <= (int)attnum; j++) {
if (TupleDescAttr(tuple_desc, j)->attlen <= 0) {
slow = true;
break;
@ -323,8 +323,8 @@ Datum nocache_index_getattr(IndexTuple tup, uint32 attnum, TupleDesc tuple_desc)
}
if (!slow) {
uint32 natts = tuple_desc->natts;
uint32 j = 1;
int natts = tuple_desc->natts;
int j = 1;
/*
* If we get here, we have a tuple with no nulls or var-widths up to
@ -350,19 +350,19 @@ Datum nocache_index_getattr(IndexTuple tup, uint32 attnum, TupleDesc tuple_desc)
if (attr->attlen <= 0)
break;
off = att_align_nominal((uint32)off, attr->attalign);
off = att_align_nominal(off, attr->attalign);
attr->attcacheoff = off;
off += attr->attlen;
}
Assert(j > attnum);
Assert(j > (int)attnum);
off = TupleDescAttr(tuple_desc, attnum)->attcacheoff;
} else {
bool usecache = true;
uint32 i;
int i;
/*
* Now we know that we have to walk the tuple CAREFULLY. But we still
@ -393,21 +393,21 @@ Datum nocache_index_getattr(IndexTuple tup, uint32 attnum, TupleDesc tuple_desc)
* no pad bytes in any case: then the offset will be valid for
* either an aligned or unaligned value.
*/
if (usecache && (uintptr_t)(off) == att_align_nominal((uint32)off, att->attalign))
if (usecache && (uintptr_t)(off) == att_align_nominal(off, att->attalign))
att->attcacheoff = off;
else {
off = att_align_pointer((uint32)off, att->attalign, -1, tp + off);
off = att_align_pointer(off, att->attalign, -1, tp + off);
usecache = false;
}
} else {
/* not varlena, so safe to use att_align_nominal */
off = att_align_nominal((uint32)off, att->attalign);
off = att_align_nominal(off, att->attalign);
if (usecache)
att->attcacheoff = off;
}
if (i == attnum)
if (i == (int)attnum)
break;
off = att_addlength_pointer(off, att->attlen, tp + off);

View File

@ -2413,7 +2413,8 @@ bool heap_hot_search_buffer(ItemPointer tid, Relation relation, Buffer buffer, S
bool at_chain_start = false;
bool valid = false;
bool skip = false;
TransactionId oldestXmin;
TransactionId oldestXmin = InvalidOid;
bool needOldestXmin = true;
/* If this is not the first call, previous call returned a (live!) tuple */
if (all_dead != NULL) {
@ -2426,8 +2427,6 @@ bool heap_hot_search_buffer(ItemPointer tid, Relation relation, Buffer buffer, S
skip = !first_call;
Assert(TransactionIdIsValid(u_sess->utils_cxt.RecentGlobalXmin));
oldestXmin = GetOldestXminForHot(relation);
Assert(BufferGetBlockNumber(buffer) == blkno);
HeapTupleCopyBaseFromPage(heap_tuple, dp);
@ -2540,8 +2539,14 @@ bool heap_hot_search_buffer(ItemPointer tid, Relation relation, Buffer buffer, S
* request, check whether all chain members are dead to all
* transactions.
*/
if (all_dead && *all_dead && !HeapTupleIsSurelyDead(heap_tuple, oldestXmin)) {
*all_dead = false;
if (all_dead && *all_dead) {
if (needOldestXmin) {
oldestXmin = GetOldestXminForHot(relation);
needOldestXmin = false;
}
if (!HeapTupleIsSurelyDead(heap_tuple, oldestXmin)) {
*all_dead = false;
}
}
/*

View File

@ -1040,7 +1040,7 @@ static bool HeapTupleSatisfiesMVCC(HeapTuple htup, Snapshot snapshot, Buffer buf
* Show any tuples including dirty ones when u_sess->attr.attr_storage.enable_show_any_tuples is true.
* GUC param u_sess->attr.attr_storage.enable_show_any_tuples is just for analyse or maintenance
*/
if (u_sess->attr.attr_common.XactReadOnly && u_sess->attr.attr_storage.enable_show_any_tuples)
if (u_sess->attr.attr_storage.enable_show_any_tuples && u_sess->attr.attr_common.XactReadOnly)
return true;
if (!HeapTupleHeaderXminCommitted(tuple)) {
@ -1851,7 +1851,7 @@ static bool HeapTupleSatisfiesDecodeMVCC(HeapTuple htup, Snapshot snapshot, Buff
* Show any tuples including dirty ones when u_sess->attr.attr_storage.enable_show_any_tuples is true.
* GUC param u_sess->attr.attr_storage.enable_show_any_tuples is just for analyse or maintenance
*/
if (u_sess->attr.attr_common.XactReadOnly && u_sess->attr.attr_storage.enable_show_any_tuples)
if (u_sess->attr.attr_storage.enable_show_any_tuples && u_sess->attr.attr_common.XactReadOnly)
return true;
bool getVisibility = false;

View File

@ -67,6 +67,7 @@
#include "postgres.h"
#include "knl/knl_variable.h"
#include "access/nbtree.h"
#include "access/relscan.h"
#include "access/transam.h"
#include "access/tableam.h"
@ -334,7 +335,7 @@ void index_rescan(IndexScanDesc scan, ScanKey keys, int nkeys, ScanKey orderbys,
Assert(norderbys == scan->numberOfOrderBys);
/* Release resources (like buffer pins) from table accesses */
if (scan->xs_heapfetch)
if (!u_sess->attr.attr_common.enable_indexscan_optimization && scan->xs_heapfetch)
tableam_scan_index_fetch_reset(scan->xs_heapfetch);
/* Release any held pin on a heap page */
@ -433,7 +434,7 @@ void index_restrpos(IndexScanDesc scan)
GET_SCAN_PROCEDURE(amrestrpos);
/* Release resources (like buffer pins) from table accesses */
if (scan->xs_heapfetch)
if (!u_sess->attr.attr_common.enable_indexscan_optimization && scan->xs_heapfetch)
tableam_scan_index_fetch_reset(scan->xs_heapfetch);
scan->xs_continue_hot = false;
@ -466,7 +467,10 @@ ItemPointer index_getnext_tid(IndexScanDesc scan, ScanDirection direction)
* scan->xs_recheck and possibly scan->xs_itup, though we pay no attention
* to those fields here.
*/
found = DatumGetBool(FunctionCall2(procedure, PointerGetDatum(scan), Int32GetDatum(direction)));
if (!u_sess->attr.attr_common.enable_indexscan_optimization)
found = DatumGetBool(FunctionCall2(procedure, PointerGetDatum(scan), Int32GetDatum(direction)));
else
found = _bt_gettuple_internal(scan, direction);
/* Reset kill flag immediately for safety */
scan->kill_prior_tuple = false;
@ -474,8 +478,9 @@ ItemPointer index_getnext_tid(IndexScanDesc scan, ScanDirection direction)
/* If we're out of index entries, we're done */
if (!found) {
/* Release resources (like buffer pins) from table accesses */
if (scan->xs_heapfetch)
if (!u_sess->attr.attr_common.enable_indexscan_optimization && scan->xs_heapfetch) {
tableam_scan_index_fetch_reset(scan->xs_heapfetch);
}
/* ... but first, release any held pin on a heap page */
if (BufferIsValid(scan->xs_cbuf)) {
ReleaseBuffer(scan->xs_cbuf);
@ -524,8 +529,10 @@ Tuple IndexFetchTuple(IndexScanDesc scan)
bool all_dead = false;
Tuple fetchedTuple = NULL;
fetchedTuple = tableam_scan_index_fetch_tuple(scan, &all_dead);
if (!u_sess->attr.attr_common.enable_indexscan_optimization)
fetchedTuple = tableam_scan_index_fetch_tuple(scan, &all_dead);
else
fetchedTuple = (Tuple)heapam_index_fetch_tuple(scan, &all_dead);
if (fetchedTuple) {
pgstat_count_heap_fetch(scan->indexRelation);

View File

@ -426,12 +426,18 @@ int32 _bt_compare(Relation rel, int keysz, ScanKey scankey, Page page, OffsetNum
if (likely((!(scankey->sk_flags & SK_ISNULL)) && !isNull)) {
/* btint4cmp */
if (scankey->sk_func.fn_oid == BTINT4CMP_OID) {
if (scankey->sk_func.fn_oid == 351) { // F_BTINT4CMP
if ((int32)datum != (int32)scankey->sk_argument) {
result = ((int32)datum > (int32)scankey->sk_argument) ? 1 : -1;
} else {
continue;
}
} else if (scankey->sk_func.fn_oid == 2189) { // F_BTINT84CMP
if ((int64)datum != (int64)scankey->sk_argument) {
result = ((int64)datum > (int64)scankey->sk_argument) ? 1 : -1;
} else {
continue;
}
} else {
result = DatumGetInt32(
FunctionCall2Coll(&scankey->sk_func, scankey->sk_collation, datum, scankey->sk_argument));
@ -959,11 +965,14 @@ bool _bt_first(IndexScanDesc scan, ScanDirection dir)
if (scan->xs_want_itup) {
scan->xs_itup = (IndexTuple)(so->currTuples + currItem->tupleOffset);
}
if (scan->xs_want_ext_oid && GPIScanCheckPartOid(scan->xs_gpi_scan, currItem->partitionOid)) {
GPISetCurrPartOid(scan->xs_gpi_scan, currItem->partitionOid);
}
if (scan->xs_want_bucketid && cbi_scan_need_change_bucket(scan->xs_cbi_scan, currItem->bucketid)) {
cbi_set_bucketid(scan->xs_cbi_scan, currItem->bucketid);
if (!u_sess->attr.attr_common.enable_indexscan_optimization) {
if (scan->xs_want_ext_oid && GPIScanCheckPartOid(scan->xs_gpi_scan, currItem->partitionOid)) {
GPISetCurrPartOid(scan->xs_gpi_scan, currItem->partitionOid);
}
if (scan->xs_want_bucketid && cbi_scan_need_change_bucket(scan->xs_cbi_scan, currItem->bucketid)) {
cbi_set_bucketid(scan->xs_cbi_scan, currItem->bucketid);
}
}
return true;
@ -1020,12 +1029,13 @@ bool _bt_next(IndexScanDesc scan, ScanDirection dir)
if (scan->xs_want_itup)
scan->xs_itup = (IndexTuple)(so->currTuples + currItem->tupleOffset);
if (scan->xs_want_ext_oid && GPIScanCheckPartOid(scan->xs_gpi_scan, currItem->partitionOid)) {
GPISetCurrPartOid(scan->xs_gpi_scan, currItem->partitionOid);
}
if (scan->xs_want_bucketid && cbi_scan_need_change_bucket(scan->xs_cbi_scan, currItem->bucketid)) {
cbi_set_bucketid(scan->xs_cbi_scan, currItem->bucketid);
if (!u_sess->attr.attr_common.enable_indexscan_optimization) {
if (scan->xs_want_ext_oid && GPIScanCheckPartOid(scan->xs_gpi_scan, currItem->partitionOid)) {
GPISetCurrPartOid(scan->xs_gpi_scan, currItem->partitionOid);
}
if (scan->xs_want_bucketid && cbi_scan_need_change_bucket(scan->xs_cbi_scan, currItem->bucketid)) {
cbi_set_bucketid(scan->xs_cbi_scan, currItem->bucketid);
}
}
return true;
@ -1058,9 +1068,13 @@ static bool _bt_readpage(IndexScanDesc scan, ScanDirection dir, OffsetNumber off
int indnatts;
Oid partOid = InvalidOid;
Oid heapOid = IndexScanGetPartHeapOid(scan);
Oid heapOid = InvalidOid;
int2 bucketid = InvalidBktId;
if (!u_sess->attr.attr_common.enable_indexscan_optimization) {
heapOid = IndexScanGetPartHeapOid(scan);
}
/* we must have the buffer pinned and locked */
Assert(BufferIsValid(so->currPos.buf));
@ -1102,12 +1116,17 @@ static bool _bt_readpage(IndexScanDesc scan, ScanDirection dir, OffsetNumber off
itup = (IndexTuple) PageGetItem(page, iid);
if (_bt_checkkeys(scan, itup, indnatts, dir, &continuescan)) {
/* Get partition oid for global partition index. */
partOid = scan->xs_want_ext_oid ? index_getattr_tableoid(scan->indexRelation, itup) : heapOid;
/* Get bucketid for crossbucket index. */
bucketid = scan->xs_want_bucketid ? index_getattr_bucketid(scan->indexRelation, itup) : InvalidBktId;
/* tuple passes all scan key conditions, so remember it */
_bt_saveitem(so, itemIndex, offnum, itup, partOid, bucketid);
if (!u_sess->attr.attr_common.enable_indexscan_optimization) {
/* Get partition oid for global partition index. */
partOid = scan->xs_want_ext_oid ? index_getattr_tableoid(scan->indexRelation, itup) : heapOid;
/* Get bucketid for crossbucket index. */
bucketid = scan->xs_want_bucketid ? index_getattr_bucketid(scan->indexRelation, itup) : InvalidBktId;
/* tuple passes all scan key conditions, so remember it */
_bt_saveitem(so, itemIndex, offnum, itup, partOid, bucketid);
}
else {
_bt_saveitem(so, itemIndex, offnum, itup, InvalidOid, InvalidBktId);
}
itemIndex++;
}
/* When !continuescan, there can't be any more matches, so stop */
@ -1184,11 +1203,17 @@ static bool _bt_readpage(IndexScanDesc scan, ScanDirection dir, OffsetNumber off
passes_quals = _bt_checkkeys(scan, itup, indnatts, dir,
&continuescan);
if (passes_quals && tuple_alive) {
partOid = scan->xs_want_ext_oid ? index_getattr_tableoid(scan->indexRelation, itup) : heapOid;
bucketid = scan->xs_want_bucketid ? index_getattr_bucketid(scan->indexRelation, itup) : InvalidBktId;
/* tuple passes all scan key conditions, so remember it */
itemIndex--;
_bt_saveitem(so, itemIndex, offnum, itup, partOid, bucketid);
if (!u_sess->attr.attr_common.enable_indexscan_optimization) {
partOid = scan->xs_want_ext_oid ? index_getattr_tableoid(scan->indexRelation, itup) : heapOid;
bucketid = scan->xs_want_bucketid ? index_getattr_bucketid(scan->indexRelation, itup) : InvalidBktId;
/* tuple passes all scan key conditions, so remember it */
itemIndex--;
_bt_saveitem(so, itemIndex, offnum, itup, partOid, bucketid);
} else {
/* tuple passes all scan key conditions, so remember it */
itemIndex--;
_bt_saveitem(so, itemIndex, offnum, itup, InvalidOid, InvalidBktId);
}
}
if (!continuescan) {
/* there can't be any more matches, so stop */
@ -1628,12 +1653,13 @@ static bool _bt_endpoint(IndexScanDesc scan, ScanDirection dir)
if (scan->xs_want_itup)
scan->xs_itup = (IndexTuple)(so->currTuples + currItem->tupleOffset);
if (scan->xs_want_ext_oid && GPIScanCheckPartOid(scan->xs_gpi_scan, currItem->partitionOid)) {
GPISetCurrPartOid(scan->xs_gpi_scan, currItem->partitionOid);
}
if (scan->xs_want_bucketid && cbi_scan_need_change_bucket(scan->xs_cbi_scan, currItem->bucketid)) {
cbi_set_bucketid(scan->xs_cbi_scan, currItem->bucketid);
if (!u_sess->attr.attr_common.enable_indexscan_optimization) {
if (scan->xs_want_ext_oid && GPIScanCheckPartOid(scan->xs_gpi_scan, currItem->partitionOid)) {
GPISetCurrPartOid(scan->xs_gpi_scan, currItem->partitionOid);
}
if (scan->xs_want_bucketid && cbi_scan_need_change_bucket(scan->xs_cbi_scan, currItem->bucketid)) {
cbi_set_bucketid(scan->xs_cbi_scan, currItem->bucketid);
}
}
return true;

View File

@ -1335,8 +1335,43 @@ bool _bt_checkkeys(IndexScanDesc scan, IndexTuple tuple, int tupnatts,
*/
return false;
}
if (!u_sess->attr.attr_common.enable_indexscan_optimization)
test = FunctionCall2Coll(&key->sk_func, key->sk_collation, datum, key->sk_argument);
else {
switch (key->sk_func.fn_oid) {
case 474: // F_INT84EQ
if ((int64)datum != (int64)key->sk_argument) {
test = 0;
break;
} else {
continue;
}
case 65: // F_INT4EQ
if ((int32)datum != (int32)key->sk_argument) {
test = 0;
break;
} else {
continue;
}
case 147: // F_INT4GT
if ((int32)datum <= (int32)key->sk_argument) {
test = 0;
break;
} else {
continue;
}
case 66: // F_INT4LT
if ((int32)datum >= (int32)key->sk_argument) {
test = 0;
break;
} else {
continue;
}
default:
test = FunctionCall2Coll(&key->sk_func, key->sk_collation, datum, key->sk_argument);
}
}
test = FunctionCall2Coll(&key->sk_func, key->sk_collation, datum, key->sk_argument);
if (!DatumGetBool(test)) {
/*
* Tuple fails this qual. If it's a required qual for the current

View File

@ -89,8 +89,10 @@ uint32 BufTableHashCode(BufferTag *tagPtr)
int BufTableLookup(BufferTag *tag, uint32 hashcode)
{
BufferLookupEnt *result = NULL;
result = (BufferLookupEnt *)buf_hash_operate<HASH_FIND>(t_thrd.storage_cxt.SharedBufHash, tag, hashcode, NULL);
if (!u_sess->attr.attr_common.enable_indexscan_optimization)
result = (BufferLookupEnt *)buf_hash_operate<HASH_FIND>(t_thrd.storage_cxt.SharedBufHash, tag, hashcode, NULL);
else
result = (BufferLookupEnt *)hash_search_with_hash_value(t_thrd.storage_cxt.SharedBufHash, (void*) tag, hashcode, HASH_FIND, NULL);
if (SECUREC_UNLIKELY(result == NULL)) {
return -1;

View File

@ -794,7 +794,9 @@ static void LWThreadSuicide(PGPROC *proc, int extraWaits, LWLock *lock, LWLockMo
while (extraWaits-- > 0) {
PGSemaphoreUnlock(&proc->sem);
}
instr_stmt_report_lock(LWLOCK_WAIT_END);
if (!u_sess->attr.attr_common.enable_indexscan_optimization)
instr_stmt_report_lock(LWLOCK_WAIT_END);
LWLockReportWaitFailed(lock);
ereport(FATAL, (errmsg("force thread %lu to exit because of lwlock deadlock", proc->pid),
errdetail("Lock Info: (%s), mode %d", T_NAME(lock), mode)));
@ -1277,7 +1279,8 @@ bool LWLockAcquire(LWLock *lock, LWLockMode mode, bool need_update_lockid)
ereport(ERROR, (errcode(ERRCODE_LOCK_NOT_AVAILABLE), errmsg("too many LWLocks taken")));
}
remember_lwlock_acquire(lock);
if (!u_sess->attr.attr_common.enable_indexscan_optimization)
remember_lwlock_acquire(lock);
/*
* Lock out cancel/die interrupts until we exit the code section protected
@ -1345,8 +1348,7 @@ bool LWLockAcquire(LWLock *lock, LWLockMode mode, bool need_update_lockid)
}
break;
}
if (need_update_lockid &&
if (!u_sess->attr.attr_common.enable_indexscan_optimization && need_update_lockid &&
get_dirty_page_num() >= g_instance.ckpt_cxt_ctl->dirty_page_queue_size * NEED_UPDATE_LOCKID_QUEUE_SLOT) {
update_wait_lockid(lock);
}
@ -1410,7 +1412,8 @@ bool LWLockAcquire(LWLock *lock, LWLockMode mode, bool need_update_lockid)
TRACE_POSTGRESQL_LWLOCK_ACQUIRE(T_NAME(lock), mode);
}
forget_lwlock_acquire();
if (!u_sess->attr.attr_common.enable_indexscan_optimization)
forget_lwlock_acquire();
/* Add lock to list of locks held by this backend */
t_thrd.storage_cxt.held_lwlocks[t_thrd.storage_cxt.num_held_lwlocks].lock = lock;
@ -1527,7 +1530,8 @@ bool LWLockAcquireOrWait(LWLock *lock, LWLockMode mode)
#ifdef LWLOCK_STATS
lwstats->block_count++;
#endif
remember_lwlock_acquire(lock);
if (!u_sess->attr.attr_common.enable_indexscan_optimization)
remember_lwlock_acquire(lock);
for (;;) {
/* "false" means cannot accept cancel/die interrupt here. */
@ -1540,8 +1544,8 @@ bool LWLockAcquireOrWait(LWLock *lock, LWLockMode mode)
}
extraWaits++;
}
forget_lwlock_acquire();
if (!u_sess->attr.attr_common.enable_indexscan_optimization)
forget_lwlock_acquire();
#ifdef LOCK_DEBUG
{

View File

@ -107,8 +107,6 @@ typedef struct IndexScanDescData {
Relation indexRelation; /* index relation descriptor */
bool isUpsert;
GPIScanDesc xs_gpi_scan; /* global partition index scan use information */
CBIScanDesc xs_cbi_scan; /* global bucket index scan use information */
Snapshot xs_snapshot; /* snapshot to see */
int numberOfKeys; /* number of index qualifier conditions */
int numberOfOrderBys; /* number of ordering operators */
@ -147,6 +145,8 @@ typedef struct IndexScanDescData {
/* put decompressed heap tuple data into xs_ctbuf_hdr be careful! when malloc memory should give extra mem for
*xs_ctbuf_hdr. t_bits which is varlength arr
*/
GPIScanDesc xs_gpi_scan; /* global partition index scan use information */
CBIScanDesc xs_cbi_scan; /* global bucket index scan use information */
HeapTupleHeaderData xs_ctbuf_hdr;
/* DO NOT add any other members here. xs_ctbuf_hdr must be the last one. */
} IndexScanDescData;

View File

@ -212,6 +212,7 @@ typedef struct knl_session_attr_common {
char* router_att;
bool enable_router;
int backend_version;
bool enable_indexscan_optimization;
#ifdef ENABLE_MULTIPLE_NODES
bool enable_gpc_grayrelease_mode;
#endif