forked from openGauss-Ecosystem/openGauss-server
!2147 TupleTableSlots、相关函数调用优化
Merge pull request !2147 from ljy/perf_slot_rebase
This commit is contained in:
commit
d82f611310
|
@ -745,8 +745,8 @@ static void gcBeginForeignScan(ForeignScanState* node, int eflags)
|
|||
fsstate->resultSlot->tts_isnull[i] = true;
|
||||
}
|
||||
|
||||
fsstate->resultSlot->tts_isempty = false;
|
||||
fsstate->scanSlot->tts_isempty = false;
|
||||
fsstate->resultSlot->tts_flags &= ~TTS_FLAG_EMPTY;
|
||||
fsstate->scanSlot->tts_flags &= ~TTS_FLAG_EMPTY;
|
||||
|
||||
fsstate->attinmeta = TupleDescGetAttInMetadata(fsstate->tupdesc);
|
||||
|
||||
|
@ -917,7 +917,7 @@ static void postgresConstructResultSlotWithArray(ForeignScanState* node)
|
|||
}
|
||||
|
||||
resultSlot->tts_nvalid = resultDesc->natts;
|
||||
resultSlot->tts_isempty = false;
|
||||
resultSlot->tts_flags &= ~TTS_FLAG_EMPTY;
|
||||
}
|
||||
|
||||
static void postgresMapResultFromScanSlot(ForeignScanState* node)
|
||||
|
@ -956,7 +956,7 @@ static TupleTableSlot* gcIterateNormalForeignScan(ForeignScanState* node)
|
|||
|
||||
/* reset tupleslot on the begin */
|
||||
(void)ExecClearTuple(fsstate->resultSlot);
|
||||
fsstate->resultSlot->tts_isempty = false;
|
||||
fsstate->resultSlot->tts_flags &= ~TTS_FLAG_EMPTY;
|
||||
|
||||
TupleTableSlot* slot = node->ss.ss_ScanTupleSlot;
|
||||
|
||||
|
|
|
@ -1327,7 +1327,7 @@ static TupleTableSlot* HdfsIterateForeignScan(ForeignScanState* scanState)
|
|||
* @hdfs
|
||||
* Optimize foreign scan by using informational constraint.
|
||||
*/
|
||||
if (((ForeignScan*)scanState->ss.ps.plan)->scan.predicate_pushdown_optimized && false == tupleSlot->tts_isempty) {
|
||||
if (((ForeignScan*)scanState->ss.ps.plan)->scan.predicate_pushdown_optimized && !TTS_EMPTY(tupleSlot)) {
|
||||
/*
|
||||
* If we find a suitable tuple, set is_scan_end value is true.
|
||||
* It means that we do not find suitable tuple in the next iteration,
|
||||
|
@ -1767,7 +1767,7 @@ int HdfsAcquireSampleRows(Relation relation, int logLevel, HeapTuple* sampleRows
|
|||
(void)MemoryContextSwitchTo(oldContext);
|
||||
|
||||
/* if there are no more records to read, break */
|
||||
if (scanTupleSlot->tts_isempty) {
|
||||
if (TTS_EMPTY(scanTupleSlot)) {
|
||||
break;
|
||||
}
|
||||
|
||||
|
|
|
@ -3435,7 +3435,7 @@ double IndexBuildUHeapScan(Relation heapRelation, Relation indexRelation, IndexI
|
|||
*/
|
||||
estate = CreateExecutorState();
|
||||
econtext = GetPerTupleExprContext(estate);
|
||||
slot = MakeSingleTupleTableSlot(RelationGetDescr(heapRelation), false, TAM_USTORE);
|
||||
slot = MakeSingleTupleTableSlot(RelationGetDescr(heapRelation), false, TableAmUstore);
|
||||
|
||||
/* Arrange for econtext's scan tuple to be the tuple under test */
|
||||
econtext->ecxt_scantuple = slot;
|
||||
|
@ -5424,7 +5424,7 @@ void ScanHeapInsertCBI(Relation parentRel, Relation heapRel, Relation idxRel, Oi
|
|||
tupleDesc = heapRel->rd_att;
|
||||
estate = CreateExecutorState();
|
||||
econtext = GetPerTupleExprContext(estate);
|
||||
slot = MakeSingleTupleTableSlot(RelationGetDescr(parentRel), false, parentRel->rd_tam_type);
|
||||
slot = MakeSingleTupleTableSlot(RelationGetDescr(parentRel), false, GetTableAmRoutine(parentRel->rd_tam_type));
|
||||
econtext->ecxt_scantuple = slot;
|
||||
/* Set up execution state for predicate, if any. */
|
||||
predicate = (List*)ExecPrepareQual(idxInfo->ii_Predicate, estate);
|
||||
|
@ -5657,7 +5657,7 @@ void ScanPartitionInsertIndex(Relation partTableRel, Relation partRel, const Lis
|
|||
|
||||
if (PointerIsValid(indexRelList)) {
|
||||
estate = CreateExecutorState();
|
||||
slot = MakeSingleTupleTableSlot(RelationGetDescr(partTableRel), false, partTableRel->rd_tam_type);
|
||||
slot = MakeSingleTupleTableSlot(RelationGetDescr(partTableRel), false, GetTableAmRoutine(partTableRel->rd_tam_type));
|
||||
}
|
||||
|
||||
scan = scan_handler_tbl_beginscan(partRel, SnapshotNow, 0, NULL);
|
||||
|
@ -5877,7 +5877,7 @@ void ScanPartitionDeleteGPITuples(Relation partTableRel, Relation partRel, const
|
|||
|
||||
if (PointerIsValid(indexRelList)) {
|
||||
estate = CreateExecutorState();
|
||||
slot = MakeSingleTupleTableSlot(RelationGetDescr(partTableRel), false, partTableRel->rd_tam_type);
|
||||
slot = MakeSingleTupleTableSlot(RelationGetDescr(partTableRel), false, GetTableAmRoutine(partTableRel->rd_tam_type));
|
||||
}
|
||||
|
||||
scan = scan_handler_tbl_beginscan(partRel, SnapshotNow, 0, NULL);
|
||||
|
|
|
@ -2653,7 +2653,7 @@ retry:
|
|||
}
|
||||
|
||||
/* TO DO: Need to switch this to inplaceheapam_scan_analyze_next_block after we have tableam. */
|
||||
TupleTableSlot *slot = MakeSingleTupleTableSlot(RelationGetDescr(onerel), false, onerel->rd_tam_type);
|
||||
TupleTableSlot *slot = MakeSingleTupleTableSlot(RelationGetDescr(onerel), false, GetTableAmRoutine(onerel->rd_tam_type));
|
||||
maxoffset = UHeapPageGetMaxOffsetNumber(targpage);
|
||||
|
||||
/* Inner loop over all tuples on the selected page */
|
||||
|
@ -3920,7 +3920,7 @@ static int64 AcquireSampleDfsStoreRows(Relation onerel, int elevel, HeapTuple* r
|
|||
totalblocks = list_length(fileList);
|
||||
|
||||
/* create tuple slot for scanning */
|
||||
scanTupleSlot = MakeTupleTableSlot(true, tupdesc->tdTableAmType);
|
||||
scanTupleSlot = MakeTupleTableSlot(true, GetTableAmRoutine(tupdesc->tdTableAmType));
|
||||
scanTupleSlot->tts_tupleDescriptor = tupdesc;
|
||||
scanTupleSlot->tts_values = columnValues;
|
||||
scanTupleSlot->tts_isnull = columnNulls;
|
||||
|
@ -3987,12 +3987,12 @@ static int64 AcquireSampleDfsStoreRows(Relation onerel, int elevel, HeapTuple* r
|
|||
randomskip = (int)(skip_factor * anl_random_fract());
|
||||
do {
|
||||
dfs::reader::DFSGetNextTuple(scanState, scanTupleSlot);
|
||||
} while (randomskip-- > 0 && !scanTupleSlot->tts_isempty);
|
||||
} while (randomskip-- > 0 && !TTS_EMPTY(scanTupleSlot));
|
||||
|
||||
/*
|
||||
* if there are no more records to read, break.
|
||||
*/
|
||||
if (scanTupleSlot->tts_isempty) {
|
||||
if (TTS_EMPTY(scanTupleSlot)) {
|
||||
(void)ExecClearTuple(scanTupleSlot);
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -1689,7 +1689,7 @@ double CopyUHeapDataInternal(Relation oldHeap, Relation oldIndex, Relation newHe
|
|||
values = (Datum *)palloc(natts * sizeof(Datum));
|
||||
isnull = (bool *)palloc(natts * sizeof(bool));
|
||||
|
||||
slot = MakeSingleTupleTableSlot(oldTupDesc, false, oldTupDesc->tdTableAmType);
|
||||
slot = MakeSingleTupleTableSlot(oldTupDesc, false, GetTableAmRoutine(oldTupDesc->tdTableAmType));
|
||||
|
||||
/* Initialize the rewrite operation */
|
||||
rwstate = begin_heap_rewrite(oldHeap, newHeap, oldestXmin, freezeXid, useWal);
|
||||
|
|
|
@ -4025,7 +4025,7 @@ static uint64 CopyFrom(CopyState cstate)
|
|||
estate->es_range_table = cstate->range_table;
|
||||
|
||||
/* Set up a tuple slot too */
|
||||
myslot = ExecInitExtraTupleSlot(estate, cstate->rel->rd_tam_type);
|
||||
myslot = ExecInitExtraTupleSlot(estate, GetTableAmRoutine(cstate->rel->rd_tam_type));
|
||||
ExecSetSlotDescriptor(myslot, tupDesc);
|
||||
/* Triggers might need a slot as well */
|
||||
estate->es_trig_tuple_slot = ExecInitExtraTupleSlot(estate);
|
||||
|
@ -4739,7 +4739,7 @@ static uint64 CopyFrom(CopyState cstate)
|
|||
* Global Partition Index stores the partition's tableOid with the index
|
||||
* tuple which is extracted from the tuple of the slot. Make sure it is set.
|
||||
*/
|
||||
if (slot->tts_tupslotTableAm != TAM_USTORE) {
|
||||
if (!TTS_TABLEAM_IS_USTORE(slot)) {
|
||||
((HeapTuple)slot->tts_tuple)->t_tableOid = RelationGetRelid(targetRel);
|
||||
} else {
|
||||
((UHeapTuple)slot->tts_tuple)->table_oid = RelationGetRelid(targetRel);
|
||||
|
@ -5275,7 +5275,7 @@ void UHeapCopyFromInsertBatch(Relation rel, EState* estate, CommandId mycid, int
|
|||
* Global Partition Index stores the partition's tableOid with the index
|
||||
* tuple which is extracted from the tuple of the slot. Make sure it is set.
|
||||
*/
|
||||
if (myslot->tts_tupslotTableAm != TAM_USTORE) {
|
||||
if (!TTS_TABLEAM_IS_USTORE(myslot)) {
|
||||
((HeapTuple)myslot->tts_tuple)->t_tableOid = RelationGetRelid(rel);
|
||||
} else {
|
||||
((UHeapTuple)myslot->tts_tuple)->table_oid = RelationGetRelid(rel);
|
||||
|
|
|
@ -382,7 +382,7 @@ static void ExecHandleMatData(TupleTableSlot *slot, Relation matview, Oid mapid,
|
|||
HeapTuple tuple;
|
||||
Oid matid = RelationGetRelid(matview);
|
||||
|
||||
if (slot == NULL || slot->tts_isempty) {
|
||||
if (slot == NULL || TTS_EMPTY(slot)) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -442,7 +442,7 @@ static void ExecHandleIncData(TupleTableSlot *slot, Relation matview, Oid mapid,
|
|||
HeapTuple tuple;
|
||||
Oid mvid = RelationGetRelid(matview);
|
||||
|
||||
if (slot == NULL || slot->tts_isempty) {
|
||||
if (slot == NULL || TTS_EMPTY(slot)) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
|
@ -8378,8 +8378,8 @@ static void ATRewriteTableInternal(AlteredTableInfo* tab, Relation oldrel, Relat
|
|||
* tuples are the same, the tupDescs might not be (consider ADD COLUMN
|
||||
* without a default).
|
||||
*/
|
||||
oldslot = MakeSingleTupleTableSlot(oldTupDesc, false, oldrel->rd_tam_type);
|
||||
newslot = MakeSingleTupleTableSlot(newTupDesc, false, oldrel->rd_tam_type);
|
||||
oldslot = MakeSingleTupleTableSlot(oldTupDesc, false, GetTableAmRoutine(oldrel->rd_tam_type));
|
||||
newslot = MakeSingleTupleTableSlot(newTupDesc, false, GetTableAmRoutine(oldrel->rd_tam_type));
|
||||
|
||||
/* Preallocate values/isnull arrays */
|
||||
i = Max(newTupDesc->natts, oldTupDesc->natts);
|
||||
|
@ -8531,7 +8531,7 @@ static void ATRewriteTableInternal(AlteredTableInfo* tab, Relation oldrel, Relat
|
|||
* will not try to clear it after we reset the context. Note that we don't explicitly pfree its
|
||||
* tuple since the per-tuple memory context will be reset shortly.
|
||||
*/
|
||||
oldslot->tts_shouldFree = false;
|
||||
oldslot->tts_flags &= ~TTS_FLAG_SHOULDFREE;
|
||||
|
||||
UHeapTuple backUpTup = BackUpScanCuTup(((UHeapScanDesc) scan)->rs_cutup);
|
||||
ResetExprContext(econtext);
|
||||
|
@ -12036,7 +12036,7 @@ static void validateCheckConstraint(Relation rel, HeapTuple constrtup)
|
|||
List* exprstate = ExecPrepareExprList(make_ands_implicit(origexpr), estate);
|
||||
ExprContext* econtext = GetPerTupleExprContext(estate);
|
||||
TupleDesc tupdesc = RelationGetDescr(rel);
|
||||
TupleTableSlot* slot = MakeSingleTupleTableSlot(tupdesc, false, rel->rd_tam_type);
|
||||
TupleTableSlot* slot = MakeSingleTupleTableSlot(tupdesc, false, GetTableAmRoutine(rel->rd_tam_type));
|
||||
|
||||
econtext->ecxt_scantuple = slot;
|
||||
|
||||
|
@ -26387,7 +26387,7 @@ static void exec_only_test_dfs_table(AlteredTableInfo* tab)
|
|||
/*
|
||||
* Create tuple slot for scanning.
|
||||
*/
|
||||
scan_tuple_slot = MakeTupleTableSlot(true, tuple_desc->tdTableAmType);
|
||||
scan_tuple_slot = MakeTupleTableSlot(true, GetTableAmRoutine(tuple_desc->tdTableAmType));
|
||||
scan_tuple_slot->tts_tupleDescriptor = tuple_desc;
|
||||
scan_tuple_slot->tts_values = values;
|
||||
scan_tuple_slot->tts_isnull = nulls;
|
||||
|
@ -26395,7 +26395,7 @@ static void exec_only_test_dfs_table(AlteredTableInfo* tab)
|
|||
do {
|
||||
dfs::reader::DFSGetNextTuple(scan, scan_tuple_slot);
|
||||
|
||||
if (!scan_tuple_slot->tts_isempty) {
|
||||
if (!TTS_EMPTY(scan_tuple_slot)) {
|
||||
|
||||
tuple = heap_form_tuple(tuple_desc, values, nulls);
|
||||
foreach (lc, not_null_attrs) {
|
||||
|
|
|
@ -2681,7 +2681,7 @@ static HeapTuple GetTupleForTrigger(EState* estate, EPQState* epqstate, ResultRe
|
|||
}
|
||||
|
||||
if (RelationIsUstoreFormat(relation)) {
|
||||
TupleTableSlot *slot = MakeSingleTupleTableSlot(relation->rd_att, false, TAM_USTORE);
|
||||
TupleTableSlot *slot = MakeSingleTupleTableSlot(relation->rd_att, false, TableAmUstore);
|
||||
UHeapTuple utuple;
|
||||
|
||||
UHeapTupleData uheaptupdata;
|
||||
|
|
|
@ -48,6 +48,7 @@
|
|||
#include "utils/syscache.h"
|
||||
#include "utils/typcache.h"
|
||||
#include "vecexecutor/vecexecutor.h"
|
||||
#include "access/tableam.h"
|
||||
|
||||
/* We only create optimized path only when the skew ratio is large than the limit. */
|
||||
#define SKEW_RATIO_LIMIT 3.0
|
||||
|
@ -1398,7 +1399,7 @@ bool SkewInfo::canValuePassQual(List* varList, List* valueList, Expr* expr)
|
|||
rte = planner_rt_fetch(rel->relid, m_root);
|
||||
heaprel = heap_open(rte->relid, NoLock);
|
||||
tupdesc = RelationGetDescr(heaprel);
|
||||
slot = MakeSingleTupleTableSlot(tupdesc, false, heaprel->rd_tam_type);
|
||||
slot = MakeSingleTupleTableSlot(tupdesc, false, GetTableAmRoutine(heaprel->rd_tam_type));
|
||||
slot->tts_nvalid = tupdesc->natts;
|
||||
heap_close(heaprel, NoLock);
|
||||
|
||||
|
|
|
@ -3190,7 +3190,7 @@ TupleTableSlot *EvalPlanQualUSlot(EPQState *epqstate, Relation relation, Index r
|
|||
if (*slot == NULL) {
|
||||
MemoryContext oldcontext = MemoryContextSwitchTo(epqstate->parentestate->es_query_cxt);
|
||||
|
||||
*slot = ExecAllocTableSlot(&epqstate->estate->es_tupleTable, TAM_USTORE);
|
||||
*slot = ExecAllocTableSlot(&epqstate->estate->es_tupleTable, TableAmUstore);
|
||||
if (relation)
|
||||
ExecSetSlotDescriptor(*slot, RelationGetDescr(relation));
|
||||
else
|
||||
|
@ -3199,7 +3199,7 @@ TupleTableSlot *EvalPlanQualUSlot(EPQState *epqstate, Relation relation, Index r
|
|||
MemoryContextSwitchTo(oldcontext);
|
||||
}
|
||||
|
||||
(*slot)->tts_tupslotTableAm = TAM_USTORE;
|
||||
(*slot)->tts_tam_ops = TableAmUstore;
|
||||
|
||||
return *slot;
|
||||
}
|
||||
|
|
|
@ -112,18 +112,15 @@ static TupleDesc ExecTypeFromTLInternal(List* target_list, bool has_oid, bool sk
|
|||
* Basic routine to make an empty TupleTableSlot.
|
||||
* --------------------------------
|
||||
*/
|
||||
TupleTableSlot* MakeTupleTableSlot(bool has_tuple_mcxt, TableAmType tupslotTableAm)
|
||||
TupleTableSlot* MakeTupleTableSlot(bool has_tuple_mcxt, const TableAmRoutine* tam_ops)
|
||||
{
|
||||
TupleTableSlot* slot = makeNode(TupleTableSlot);
|
||||
Assert(tupslotTableAm == TAM_HEAP || tupslotTableAm == TAM_USTORE);
|
||||
Assert(tam_ops == TableAmHeap || tam_ops == TableAmUstore);
|
||||
|
||||
slot->tts_isempty = true;
|
||||
slot->tts_shouldFree = false;
|
||||
slot->tts_shouldFreeMin = false;
|
||||
slot->tts_flags |= TTS_FLAG_EMPTY;
|
||||
slot->tts_tuple = NULL;
|
||||
slot->tts_tupleDescriptor = NULL;
|
||||
#ifdef PGXC
|
||||
slot->tts_shouldFreeRow = false;
|
||||
slot->tts_dataRow = NULL;
|
||||
slot->tts_dataLen = -1;
|
||||
slot->tts_attinmeta = NULL;
|
||||
|
@ -141,7 +138,7 @@ TupleTableSlot* MakeTupleTableSlot(bool has_tuple_mcxt, TableAmType tupslotTable
|
|||
ALLOCSET_DEFAULT_INITSIZE,
|
||||
ALLOCSET_DEFAULT_MAXSIZE) : NULL;
|
||||
#endif
|
||||
slot->tts_tupslotTableAm = tupslotTableAm;
|
||||
slot->tts_tam_ops = tam_ops;
|
||||
|
||||
return slot;
|
||||
}
|
||||
|
@ -152,7 +149,7 @@ TupleTableSlot* MakeTupleTableSlot(bool has_tuple_mcxt, TableAmType tupslotTable
|
|||
* Create a tuple table slot within a tuple table (which is just a List).
|
||||
* --------------------------------
|
||||
*/
|
||||
TupleTableSlot* ExecAllocTableSlot(List** tuple_table, TableAmType tupslotTableAm)
|
||||
TupleTableSlot* ExecAllocTableSlot(List** tuple_table, const TableAmRoutine* tam_ops)
|
||||
{
|
||||
TupleTableSlot* slot;
|
||||
|
||||
|
@ -160,7 +157,7 @@ TupleTableSlot* ExecAllocTableSlot(List** tuple_table, TableAmType tupslotTableA
|
|||
|
||||
*tuple_table = lappend(*tuple_table, slot);
|
||||
|
||||
slot->tts_tupslotTableAm = tupslotTableAm;
|
||||
slot->tts_tam_ops = tam_ops;
|
||||
|
||||
return slot;
|
||||
}
|
||||
|
@ -208,7 +205,7 @@ void ExecResetTupleTable(List* tuple_table, /* tuple table */
|
|||
}
|
||||
}
|
||||
|
||||
TupleTableSlot* ExecMakeTupleSlot(Tuple tuple, TableScanDesc tableScan, TupleTableSlot* slot, TableAmType tableAm)
|
||||
TupleTableSlot* ExecMakeTupleSlot(Tuple tuple, TableScanDesc tableScan, TupleTableSlot* slot, const TableAmRoutine* tam_ops)
|
||||
{
|
||||
if (unlikely(RELATION_CREATE_BUCKET(tableScan->rs_rd))) {
|
||||
tableScan = ((HBktTblScanDesc)tableScan)->currBktScan;
|
||||
|
@ -216,7 +213,7 @@ TupleTableSlot* ExecMakeTupleSlot(Tuple tuple, TableScanDesc tableScan, TupleTab
|
|||
|
||||
if (tuple != NULL) {
|
||||
Assert(tableScan != NULL);
|
||||
slot->tts_tupslotTableAm = tableAm;
|
||||
slot->tts_tam_ops = tam_ops;
|
||||
return ExecStoreTuple(tuple, /* tuple to store */
|
||||
slot, /* slot to store in */
|
||||
tableScan->rs_cbuf, /* buffer associated with this tuple */
|
||||
|
@ -235,9 +232,9 @@ TupleTableSlot* ExecMakeTupleSlot(Tuple tuple, TableScanDesc tableScan, TupleTab
|
|||
* to use the given tuple descriptor.
|
||||
* --------------------------------
|
||||
*/
|
||||
TupleTableSlot* MakeSingleTupleTableSlot(TupleDesc tup_desc, bool allocSlotCxt, TableAmType tupslotTableAm)
|
||||
TupleTableSlot* MakeSingleTupleTableSlot(TupleDesc tup_desc, bool allocSlotCxt, const TableAmRoutine* tam_ops)
|
||||
{
|
||||
TupleTableSlot* slot = MakeTupleTableSlot(allocSlotCxt, tupslotTableAm);
|
||||
TupleTableSlot* slot = MakeTupleTableSlot(allocSlotCxt, tam_ops);
|
||||
ExecSetSlotDescriptor(slot, tup_desc);
|
||||
return slot;
|
||||
}
|
||||
|
@ -377,9 +374,9 @@ TupleTableSlot* ExecStoreTuple(Tuple tuple, TupleTableSlot* slot, Buffer buffer,
|
|||
Assert(slot->tts_tupleDescriptor != NULL);
|
||||
|
||||
HeapTuple htup = (HeapTuple)tuple;
|
||||
if (slot->tts_tupslotTableAm == TAM_USTORE && htup->tupTableType == HEAP_TUPLE) {
|
||||
if (TTS_TABLEAM_IS_USTORE(slot) && htup->tupTableType == HEAP_TUPLE) {
|
||||
tuple = (Tuple)HeapToUHeap(slot->tts_tupleDescriptor, (HeapTuple)tuple);
|
||||
} else if (slot->tts_tupslotTableAm == TAM_HEAP && htup->tupTableType == UHEAP_TUPLE) {
|
||||
} else if (TTS_TABLEAM_IS_HEAP(slot) && htup->tupTableType == UHEAP_TUPLE) {
|
||||
tuple = (Tuple)UHeapToHeap(slot->tts_tupleDescriptor, (UHeapTuple)tuple);
|
||||
}
|
||||
|
||||
|
@ -431,7 +428,7 @@ TupleTableSlot* ExecClearTuple(TupleTableSlot* slot) /* return: slot passed slot
|
|||
/*
|
||||
* clear the physical tuple or minimal tuple if present via TableAm.
|
||||
*/
|
||||
if (slot->tts_shouldFree || slot->tts_shouldFreeMin) {
|
||||
if (TTS_SHOULDFREE(slot) || TTS_SHOULDFREEMIN(slot)) {
|
||||
Assert(slot->tts_tupleDescriptor != NULL);
|
||||
tableam_tslot_clear(slot);
|
||||
}
|
||||
|
@ -441,14 +438,14 @@ TupleTableSlot* ExecClearTuple(TupleTableSlot* slot) /* return: slot passed slot
|
|||
*/
|
||||
slot->tts_tuple = NULL;
|
||||
slot->tts_mintuple = NULL;
|
||||
slot->tts_shouldFree = false;
|
||||
slot->tts_shouldFreeMin = false;
|
||||
slot->tts_flags &= ~TTS_FLAG_SHOULDFREE;
|
||||
slot->tts_flags &= ~TTS_FLAG_SHOULDFREEMIN;
|
||||
|
||||
#ifdef ENABLE_MULTIPLE_NODES
|
||||
if (slot->tts_shouldFreeRow) {
|
||||
if (TTS_SHOULDFREE_ROW(slot)) {
|
||||
pfree_ext(slot->tts_dataRow);
|
||||
}
|
||||
slot->tts_shouldFreeRow = false;
|
||||
slot->tts_flags = false;
|
||||
slot->tts_dataRow = NULL;
|
||||
slot->tts_dataLen = -1;
|
||||
slot->tts_xcnodeoid = 0;
|
||||
|
@ -465,7 +462,7 @@ TupleTableSlot* ExecClearTuple(TupleTableSlot* slot) /* return: slot passed slot
|
|||
/*
|
||||
* Mark it empty.
|
||||
*/
|
||||
slot->tts_isempty = true;
|
||||
slot->tts_flags |= TTS_FLAG_EMPTY;
|
||||
slot->tts_nvalid = 0;
|
||||
|
||||
// Row uncompression use slot->tts_per_tuple_mcxt in some case, So we need
|
||||
|
@ -495,15 +492,16 @@ TupleTableSlot* ExecStoreVirtualTuple(TupleTableSlot* slot)
|
|||
*/
|
||||
Assert(slot != NULL);
|
||||
Assert(slot->tts_tupleDescriptor != NULL);
|
||||
Assert(slot->tts_isempty);
|
||||
Assert(TTS_EMPTY(slot));
|
||||
|
||||
slot->tts_isempty = false;
|
||||
slot->tts_flags &= ~TTS_FLAG_EMPTY;
|
||||
slot->tts_nvalid = slot->tts_tupleDescriptor->natts;
|
||||
|
||||
if (slot->tts_tupslotTableAm != slot->tts_tupleDescriptor->tdTableAmType) {
|
||||
TableAmType slot_tam = GetTableAmType(slot->tts_tam_ops);
|
||||
if (slot_tam != slot->tts_tupleDescriptor->tdTableAmType) {
|
||||
// XXX: Should tts_tupleDescriptor be cloned before changing its contents
|
||||
// as some time it can be direct reference to the rd_att in RelationData.
|
||||
slot->tts_tupleDescriptor->tdTableAmType = slot->tts_tupslotTableAm;
|
||||
slot->tts_tupleDescriptor->tdTableAmType = slot_tam;
|
||||
}
|
||||
|
||||
return slot;
|
||||
|
@ -563,7 +561,7 @@ HeapTuple ExecCopySlotTuple(TupleTableSlot* slot)
|
|||
* sanity checks
|
||||
*/
|
||||
Assert(slot != NULL);
|
||||
Assert(!slot->tts_isempty);
|
||||
Assert(!TTS_EMPTY(slot));
|
||||
|
||||
return tableam_tslot_copy_heap_tuple(slot);
|
||||
}
|
||||
|
@ -581,7 +579,7 @@ MinimalTuple ExecCopySlotMinimalTuple(TupleTableSlot* slot, bool need_transform_
|
|||
* sanity checks
|
||||
*/
|
||||
Assert(slot != NULL);
|
||||
Assert(!slot->tts_isempty);
|
||||
Assert(!TTS_EMPTY(slot));
|
||||
|
||||
return tableam_tslot_copy_minimal_tuple(slot);
|
||||
}
|
||||
|
@ -607,7 +605,7 @@ HeapTuple ExecFetchSlotTuple(TupleTableSlot* slot)
|
|||
* sanity checks
|
||||
*/
|
||||
Assert(slot != NULL);
|
||||
Assert(!slot->tts_isempty);
|
||||
Assert(!TTS_EMPTY(slot));
|
||||
|
||||
return tableam_tslot_get_heap_tuple(slot);
|
||||
}
|
||||
|
@ -680,7 +678,7 @@ HeapTuple ExecMaterializeSlot(TupleTableSlot* slot)
|
|||
* sanity checks
|
||||
*/
|
||||
Assert(slot != NULL);
|
||||
Assert(!slot->tts_isempty);
|
||||
Assert(!TTS_EMPTY(slot));
|
||||
|
||||
return tableam_tslot_materialize(slot);
|
||||
}
|
||||
|
@ -728,27 +726,27 @@ TupleTableSlot* ExecCopySlot(TupleTableSlot* dst_slot, TupleTableSlot* src_slot)
|
|||
* ExecInitResultTupleSlot
|
||||
* ----------------
|
||||
*/
|
||||
void ExecInitResultTupleSlot(EState* estate, PlanState* plan_state, TableAmType tam)
|
||||
void ExecInitResultTupleSlot(EState* estate, PlanState* plan_state, const TableAmRoutine* tam_ops)
|
||||
{
|
||||
plan_state->ps_ResultTupleSlot = ExecAllocTableSlot(&estate->es_tupleTable, tam);
|
||||
plan_state->ps_ResultTupleSlot = ExecAllocTableSlot(&estate->es_tupleTable, tam_ops);
|
||||
}
|
||||
|
||||
/* ----------------
|
||||
* ExecInitScanTupleSlot
|
||||
* ----------------
|
||||
*/
|
||||
void ExecInitScanTupleSlot(EState* estate, ScanState* scan_state, TableAmType tam)
|
||||
void ExecInitScanTupleSlot(EState* estate, ScanState* scan_state, const TableAmRoutine* tam_ops)
|
||||
{
|
||||
scan_state->ss_ScanTupleSlot = ExecAllocTableSlot(&estate->es_tupleTable, tam);
|
||||
scan_state->ss_ScanTupleSlot = ExecAllocTableSlot(&estate->es_tupleTable, tam_ops);
|
||||
}
|
||||
|
||||
/* ----------------
|
||||
* ExecInitExtraTupleSlot
|
||||
* ----------------
|
||||
*/
|
||||
TupleTableSlot* ExecInitExtraTupleSlot(EState* estate, TableAmType tam)
|
||||
TupleTableSlot* ExecInitExtraTupleSlot(EState* estate, const TableAmRoutine* tam_ops)
|
||||
{
|
||||
return ExecAllocTableSlot(&estate->es_tupleTable, tam);
|
||||
return ExecAllocTableSlot(&estate->es_tupleTable, tam_ops);
|
||||
}
|
||||
|
||||
/* ----------------
|
||||
|
@ -1102,12 +1100,12 @@ TupleTableSlot* ExecStoreDataRowTuple(char* msg, size_t len, Oid msgnode_oid, Tu
|
|||
/*
|
||||
* Free any old physical tuple belonging to the slot.
|
||||
*/
|
||||
if (slot->tts_shouldFree && (HeapTuple)slot->tts_tuple != NULL) {
|
||||
if (TTS_SHOULDFREE(slot) && (HeapTuple)slot->tts_tuple != NULL) {
|
||||
heap_freetuple((HeapTuple)slot->tts_tuple);
|
||||
slot->tts_tuple = NULL;
|
||||
slot->tts_shouldFree = false;
|
||||
slot->tts_flags &= ~TTS_FLAG_SHOULDFREE;
|
||||
}
|
||||
if (slot->tts_shouldFreeMin) {
|
||||
if (TTS_SHOULDFREEMIN(slot)) {
|
||||
heap_free_minimal_tuple(slot->tts_mintuple);
|
||||
}
|
||||
/*
|
||||
|
@ -1116,9 +1114,9 @@ TupleTableSlot* ExecStoreDataRowTuple(char* msg, size_t len, Oid msgnode_oid, Tu
|
|||
* to reset shouldFreeRow, since it will be overwritten just below.
|
||||
*/
|
||||
if (msg == slot->tts_dataRow) {
|
||||
slot->tts_shouldFreeRow = false;
|
||||
slot->tts_flags &= ~TTS_FLAG_SHOULDFREE_ROW;
|
||||
}
|
||||
if (slot->tts_shouldFreeRow) {
|
||||
if (TTS_SHOULDFREE_ROW(slot)) {
|
||||
pfree_ext(slot->tts_dataRow);
|
||||
}
|
||||
ResetSlotPerTupleContext(slot);
|
||||
|
@ -1134,10 +1132,13 @@ TupleTableSlot* ExecStoreDataRowTuple(char* msg, size_t len, Oid msgnode_oid, Tu
|
|||
/*
|
||||
* Store the new tuple into the specified slot.
|
||||
*/
|
||||
slot->tts_isempty = false;
|
||||
slot->tts_shouldFree = false;
|
||||
slot->tts_shouldFreeMin = false;
|
||||
slot->tts_shouldFreeRow = should_free;
|
||||
slot->tts_flags &= ~TTS_FLAG_EMPTY;
|
||||
slot->tts_flags &= ~TTS_FLAG_SHOULDFREE;
|
||||
slot->tts_flags &= ~TTS_FLAG_SHOULDFREEMIN;
|
||||
if(should_free)
|
||||
slot->tts_flags |= TTS_FLAG_SHOULDFREE_ROW;
|
||||
else
|
||||
slot->tts_flags &= ~TTS_FLAG_SHOULDFREE_ROW;
|
||||
slot->tts_tuple = NULL;
|
||||
slot->tts_mintuple = NULL;
|
||||
slot->tts_dataRow = msg;
|
||||
|
|
|
@ -1881,7 +1881,7 @@ bool check_violation(Relation heap, Relation index, IndexInfo *indexInfo, ItemPo
|
|||
* to this slot. Be sure to save and restore caller's value for
|
||||
* scantuple.
|
||||
*/
|
||||
existing_slot = MakeSingleTupleTableSlot(RelationGetDescr(heap), false, heap->rd_tam_type);
|
||||
existing_slot = MakeSingleTupleTableSlot(RelationGetDescr(heap), false, GetTableAmRoutine(heap->rd_tam_type));
|
||||
econtext = GetPerTupleExprContext(estate);
|
||||
save_scantuple = econtext->ecxt_scantuple;
|
||||
econtext->ecxt_scantuple = existing_slot;
|
||||
|
|
|
@ -858,7 +858,7 @@ static void prepare_projection_slot(AggState* aggstate, TupleTableSlot* slot, in
|
|||
|
||||
aggstate->grouped_cols = grouped_cols;
|
||||
|
||||
if (slot->tts_isempty) {
|
||||
if (TTS_EMPTY(slot)) {
|
||||
/*
|
||||
* Force all values to be NULL if working on an empty input tuple
|
||||
* (i.e. an empty grouping set for which no input rows were
|
||||
|
|
|
@ -802,8 +802,8 @@ BitmapHeapScanState* ExecInitBitmapHeapScan(BitmapHeapScan* node, EState* estate
|
|||
/*
|
||||
* tuple table initialization
|
||||
*/
|
||||
ExecInitResultTupleSlot(estate, &scanstate->ss.ps, currentRelation->rd_tam_type);
|
||||
ExecInitScanTupleSlot(estate, &scanstate->ss, currentRelation->rd_tam_type);
|
||||
ExecInitResultTupleSlot(estate, &scanstate->ss.ps, GetTableAmRoutine(currentRelation->rd_tam_type));
|
||||
ExecInitScanTupleSlot(estate, &scanstate->ss, GetTableAmRoutine(currentRelation->rd_tam_type));
|
||||
|
||||
InitBitmapHeapScanNextMtd(scanstate);
|
||||
|
||||
|
|
|
@ -139,7 +139,7 @@ static TupleTableSlot* IndexOnlyNext(IndexOnlyScanState* node)
|
|||
slot = node->ss.ss_ScanTupleSlot;
|
||||
isUHeap = RelationIsUstoreFormat(node->ss.ss_currentRelation);
|
||||
tmpslot = MakeSingleTupleTableSlot(RelationGetDescr(scandesc->heapRelation),
|
||||
false, scandesc->heapRelation->rd_tam_type);
|
||||
false, GetTableAmRoutine(scandesc->heapRelation->rd_tam_type));
|
||||
|
||||
/*
|
||||
* OK, now that we have what we need, fetch the next tuple.
|
||||
|
@ -584,8 +584,8 @@ IndexOnlyScanState* ExecInitIndexOnlyScan(IndexOnlyScan* node, EState* estate, i
|
|||
/*
|
||||
* tuple table initialization
|
||||
*/
|
||||
ExecInitResultTupleSlot(estate, &indexstate->ss.ps, currentRelation->rd_tam_type);
|
||||
ExecInitScanTupleSlot(estate, &indexstate->ss, currentRelation->rd_tam_type);
|
||||
ExecInitResultTupleSlot(estate, &indexstate->ss.ps, GetTableAmRoutine(currentRelation->rd_tam_type));
|
||||
ExecInitScanTupleSlot(estate, &indexstate->ss, GetTableAmRoutine(currentRelation->rd_tam_type));
|
||||
|
||||
/*
|
||||
* Build the scan tuple type using the indextlist generated by the
|
||||
|
|
|
@ -683,8 +683,8 @@ IndexScanState* ExecInitIndexScan(IndexScan* node, EState* estate, int eflags)
|
|||
/*
|
||||
* tuple table initialization
|
||||
*/
|
||||
ExecInitResultTupleSlot(estate, &index_state->ss.ps, current_relation->rd_tam_type);
|
||||
ExecInitScanTupleSlot(estate, &index_state->ss, current_relation->rd_tam_type);
|
||||
ExecInitResultTupleSlot(estate, &index_state->ss.ps, GetTableAmRoutine(current_relation->rd_tam_type));
|
||||
ExecInitScanTupleSlot(estate, &index_state->ss, GetTableAmRoutine(current_relation->rd_tam_type));
|
||||
|
||||
/*
|
||||
* get the scan type from the relation descriptor.
|
||||
|
|
|
@ -961,7 +961,7 @@ TupleTableSlot* ExecInsertT(ModifyTableState* state, TupleTableSlot* slot, Tuple
|
|||
} else
|
||||
#endif
|
||||
if (useHeapMultiInsert) {
|
||||
TupleTableSlot* tmp_slot = MakeSingleTupleTableSlot(slot->tts_tupleDescriptor, false, result_relation_desc->rd_tam_type);
|
||||
TupleTableSlot* tmp_slot = MakeSingleTupleTableSlot(slot->tts_tupleDescriptor, false, GetTableAmRoutine(result_relation_desc->rd_tam_type));
|
||||
|
||||
bool is_partition_rel = result_relation_desc->rd_rel->parttype == PARTTYPE_PARTITIONED_RELATION;
|
||||
Oid targetOid = InvalidOid;
|
||||
|
@ -1343,7 +1343,7 @@ TupleTableSlot* ExecDelete(ItemPointer tupleid, Oid deletePartitionOid, int2 buc
|
|||
return NULL;
|
||||
}
|
||||
|
||||
if (slot->tts_isempty) {
|
||||
if (TTS_EMPTY(slot)) {
|
||||
(void)ExecStoreAllNullTuple(slot);
|
||||
}
|
||||
} else {
|
||||
|
@ -1587,9 +1587,9 @@ end:;
|
|||
if (slot->tts_tupleDescriptor != RelationGetDescr(result_relation_desc)) {
|
||||
ExecSetSlotDescriptor(slot, RelationGetDescr(result_relation_desc));
|
||||
}
|
||||
slot->tts_tupslotTableAm = result_relation_desc->rd_tam_type;
|
||||
slot->tts_tam_ops = GetTableAmRoutine(result_relation_desc->rd_tam_type);
|
||||
if (oldtuple != NULL) {
|
||||
Assert(slot->tts_tupslotTableAm != TAM_USTORE);
|
||||
Assert(!TTS_TABLEAM_IS_USTORE(slot));
|
||||
del_tuple.t_data = oldtuple;
|
||||
del_tuple.t_len = HeapTupleHeaderGetDatumLength(oldtuple);
|
||||
ItemPointerSetInvalid(&(del_tuple.t_self));
|
||||
|
@ -3405,7 +3405,7 @@ ModifyTableState* ExecInitModifyTable(ModifyTable* node, EState* estate, int efl
|
|||
|
||||
/* initialize slot for the existing tuple */
|
||||
upsertState->us_existing =
|
||||
ExecInitExtraTupleSlot(mt_state->ps.state, result_rel_info->ri_RelationDesc->rd_tam_type);
|
||||
ExecInitExtraTupleSlot(mt_state->ps.state, GetTableAmRoutine(result_rel_info->ri_RelationDesc->rd_tam_type));
|
||||
ExecSetSlotDescriptor(upsertState->us_existing, result_rel_info->ri_RelationDesc->rd_att);
|
||||
|
||||
upsertState->us_excludedtlist = node->exclRelTlist;
|
||||
|
@ -3413,7 +3413,7 @@ ModifyTableState* ExecInitModifyTable(ModifyTable* node, EState* estate, int efl
|
|||
/* create target slot for UPDATE SET projection */
|
||||
tupDesc = ExecTypeFromTL((List*)node->updateTlist, result_rel_info->ri_RelationDesc->rd_rel->relhasoids);
|
||||
upsertState->us_updateproj =
|
||||
ExecInitExtraTupleSlot(mt_state->ps.state, result_rel_info->ri_RelationDesc->rd_tam_type);
|
||||
ExecInitExtraTupleSlot(mt_state->ps.state, GetTableAmRoutine(result_rel_info->ri_RelationDesc->rd_tam_type));
|
||||
ExecSetSlotDescriptor(upsertState->us_updateproj, tupDesc);
|
||||
|
||||
/* build UPDATE SET expression and projection state */
|
||||
|
@ -3543,7 +3543,7 @@ ModifyTableState* ExecInitModifyTable(ModifyTable* node, EState* estate, int efl
|
|||
|
||||
j = ExecInitJunkFilter(sub_plan->targetlist,
|
||||
result_rel_info->ri_RelationDesc->rd_att->tdhasoid,
|
||||
ExecInitExtraTupleSlot(estate, result_rel_info->ri_RelationDesc->rd_tam_type));
|
||||
ExecInitExtraTupleSlot(estate, GetTableAmRoutine(result_rel_info->ri_RelationDesc->rd_tam_type)));
|
||||
|
||||
if (operation == CMD_UPDATE || operation == CMD_DELETE || operation == CMD_MERGE) {
|
||||
/* For UPDATE/DELETE, find the appropriate junk attr now */
|
||||
|
@ -3631,7 +3631,7 @@ ModifyTableState* ExecInitModifyTable(ModifyTable* node, EState* estate, int efl
|
|||
*/
|
||||
if (estate->es_trig_tuple_slot == NULL) {
|
||||
result_rel_info = mt_state->resultRelInfo;
|
||||
estate->es_trig_tuple_slot = ExecInitExtraTupleSlot(estate, result_rel_info->ri_RelationDesc->rd_tam_type);
|
||||
estate->es_trig_tuple_slot = ExecInitExtraTupleSlot(estate, GetTableAmRoutine(result_rel_info->ri_RelationDesc->rd_tam_type));
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -2059,13 +2059,13 @@ static void ExecInitRecursiveResultTupleSlot(EState* estate, PlanState* planstat
|
|||
{
|
||||
TupleTableSlot* slot = makeNode(TupleTableSlot);
|
||||
|
||||
slot->tts_isempty = true;
|
||||
slot->tts_shouldFree = false;
|
||||
slot->tts_shouldFreeMin = false;
|
||||
slot->tts_flags |= TTS_FLAG_EMPTY;
|
||||
slot->tts_flags &= ~TTS_FLAG_SHOULDFREE;
|
||||
slot->tts_flags &= ~TTS_FLAG_SHOULDFREEMIN;
|
||||
slot->tts_tuple = NULL;
|
||||
slot->tts_tupleDescriptor = NULL;
|
||||
#ifdef PGXC
|
||||
slot->tts_shouldFreeRow = false;
|
||||
slot->tts_flags &= ~TTS_FLAG_SHOULDFREE_ROW;
|
||||
slot->tts_dataRow = NULL;
|
||||
slot->tts_dataLen = -1;
|
||||
slot->tts_attinmeta = NULL;
|
||||
|
|
|
@ -120,7 +120,10 @@ TupleTableSlot* HeapSeqSampleNext(SeqScanState* node)
|
|||
TupleTableSlot* slot = node->ss_ScanTupleSlot;
|
||||
node->ss_ScanTupleSlot->tts_tupleDescriptor->tdTableAmType = node->ss_currentRelation->rd_tam_type;
|
||||
HeapTuple tuple = SampleFetchNextTuple(node);
|
||||
return ExecMakeTupleSlot(tuple, GetTableScanDesc(node->ss_currentScanDesc, node->ss_currentRelation), slot, node->ss_currentRelation->rd_tam_type);
|
||||
return ExecMakeTupleSlot(tuple,
|
||||
GetTableScanDesc(node->ss_currentScanDesc, node->ss_currentRelation),
|
||||
slot,
|
||||
GetTableAmRoutine(node->ss_currentRelation->rd_tam_type));
|
||||
}
|
||||
|
||||
TupleTableSlot* UHeapSeqSampleNext(SeqScanState* node)
|
||||
|
@ -177,7 +180,7 @@ TupleTableSlot* HbktSeqSampleNext(SeqScanState* node)
|
|||
return ExecMakeTupleSlot(
|
||||
(Tuple) tuple, GetTableScanDesc(node->ss_currentScanDesc, node->ss_currentRelation),
|
||||
slot,
|
||||
node->ss_currentRelation->rd_tam_type);
|
||||
GetTableAmRoutine(node->ss_currentRelation->rd_tam_type));
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -209,7 +209,7 @@ void seq_scan_getnext_template(TableScanDesc scan, TupleTableSlot* slot, ScanDi
|
|||
if (tuple != NULL) {
|
||||
Assert(slot != NULL);
|
||||
Assert(slot->tts_tupleDescriptor != NULL);
|
||||
slot->tts_tupslotTableAm = type;
|
||||
slot->tts_tam_ops = GetTableAmRoutine(type);
|
||||
if (type == TAM_USTORE) {
|
||||
UHeapSlotStoreUHeapTuple((UHeapTuple)tuple, slot, false, false);
|
||||
} else {
|
||||
|
@ -306,7 +306,7 @@ static ScanBatchResult *SeqNextBatchMode(SeqScanState *node)
|
|||
scanDesc->rs_maxScanRows = node->scanBatchState->scanTupleSlotMaxNum;
|
||||
node->scanBatchState->scanfinished = tableam_scan_gettuplebatchmode(scanDesc, direction);
|
||||
|
||||
if (slot[0]->tts_tupslotTableAm == TAM_USTORE) {
|
||||
if (TTS_TABLEAM_IS_USTORE(slot[0])) {
|
||||
ExecStoreTupleBatchMode<TAM_USTORE>(scanDesc, slot);
|
||||
} else {
|
||||
ExecStoreTupleBatchMode<TAM_HEAP>(scanDesc, slot);
|
||||
|
@ -511,8 +511,8 @@ void InitScanRelation(SeqScanState* node, EState* estate, int eflags)
|
|||
/*
|
||||
* tuple table initialization
|
||||
*/
|
||||
ExecInitResultTupleSlot(estate, &node->ps, current_relation->rd_tam_type);
|
||||
ExecInitScanTupleSlot(estate, node, current_relation->rd_tam_type);
|
||||
ExecInitResultTupleSlot(estate, &node->ps, GetTableAmRoutine(current_relation->rd_tam_type));
|
||||
ExecInitScanTupleSlot(estate, node, GetTableAmRoutine(current_relation->rd_tam_type));
|
||||
|
||||
if (((Scan*)node->ps.plan)->tablesample && node->sampleScanInfo.tsm_state == NULL) {
|
||||
if (isUstoreRel) {
|
||||
|
@ -709,7 +709,7 @@ static SeqScanState *ExecInitSeqScanBatchMode(SeqScan *node, SeqScanState* scans
|
|||
(TupleTableSlot**)palloc(sizeof(TupleTableSlot*) * BatchMaxSize);
|
||||
for (i = 0; i < BatchMaxSize; i++) {
|
||||
TupleTableSlot* slot = ExecAllocTableSlot(&estate->es_tupleTable,
|
||||
scanstate->ss_currentRelation->rd_tam_type);
|
||||
GetTableAmRoutine(scanstate->ss_currentRelation->rd_tam_type));
|
||||
ExecSetSlotDescriptor(slot, scanstate->ss_ScanTupleSlot->tts_tupleDescriptor);
|
||||
scanBatchState->scanBatch.scanTupleSlotInBatch[i] = slot;
|
||||
}
|
||||
|
|
|
@ -335,7 +335,7 @@ StartWithOpState* ExecInitStartWithOp(StartWithOp* node, EState* estate, int efl
|
|||
false, false, u_sess->attr.attr_memory.work_mem);
|
||||
|
||||
/* create the working TupleTableslot */
|
||||
state->sw_workingSlot = ExecAllocTableSlot(&estate->es_tupleTable, TAM_HEAP);
|
||||
state->sw_workingSlot = ExecAllocTableSlot(&estate->es_tupleTable, TableAmHeap);
|
||||
ExecSetSlotDescriptor(state->sw_workingSlot, ExecTypeFromTL(targetlist, false));
|
||||
|
||||
int natts = list_length(node->plan.targetlist);
|
||||
|
|
|
@ -674,8 +674,8 @@ TidScanState* ExecInitTidScan(TidScan* node, EState* estate, int eflags)
|
|||
/*
|
||||
* tuple table initialization
|
||||
*/
|
||||
ExecInitResultTupleSlot(estate, &tidstate->ss.ps, current_relation->rd_tam_type);
|
||||
ExecInitScanTupleSlot(estate, &tidstate->ss, current_relation->rd_tam_type);
|
||||
ExecInitResultTupleSlot(estate, &tidstate->ss.ps, GetTableAmRoutine(current_relation->rd_tam_type));
|
||||
ExecInitScanTupleSlot(estate, &tidstate->ss, GetTableAmRoutine(current_relation->rd_tam_type));
|
||||
|
||||
/* deal with partitioned table branch */
|
||||
if (node->scan.isPartTbl) {
|
||||
|
|
|
@ -52,7 +52,7 @@ void DeleteFusion::InitLocals(ParamListInfo params)
|
|||
|
||||
m_local.m_reslot = MakeSingleTupleTableSlot(m_global->m_tupDesc);
|
||||
if (m_global->m_table_type == TAM_USTORE) {
|
||||
m_local.m_reslot->tts_tupslotTableAm = TAM_USTORE;
|
||||
m_local.m_reslot->tts_tam_ops = TableAmUstore;
|
||||
}
|
||||
m_local.m_values = (Datum*)palloc0(m_global->m_natts * sizeof(Datum));
|
||||
m_local.m_isnull = (bool*)palloc0(m_global->m_natts * sizeof(bool));
|
||||
|
|
|
@ -154,7 +154,7 @@ void IndexOnlyScanFusion::Init(long max_rows)
|
|||
*m_direction = NoMovementScanDirection;
|
||||
}
|
||||
|
||||
m_reslot = MakeSingleTupleTableSlot(m_tupDesc, false, m_tupDesc->tdTableAmType);
|
||||
m_reslot = MakeSingleTupleTableSlot(m_tupDesc, false, GetTableAmRoutine(m_tupDesc->tdTableAmType));
|
||||
ScanState* scanstate = makeNode(ScanState); // need release
|
||||
scanstate->ps.plan = (Plan *)m_node;
|
||||
|
||||
|
@ -222,7 +222,7 @@ TupleTableSlot *IndexOnlyScanFusion::getTupleSlotInternal()
|
|||
bool bucket_changed = false;
|
||||
TupleTableSlot* tmpreslot = NULL;
|
||||
tmpreslot = MakeSingleTupleTableSlot(RelationGetDescr(m_scandesc->heapRelation),
|
||||
false, m_scandesc->heapRelation->rd_tam_type);
|
||||
false, GetTableAmRoutine(m_scandesc->heapRelation->rd_tam_type));
|
||||
|
||||
while ((tid = scan_handler_idx_getnext_tid(m_scandesc, *m_direction, &bucket_changed)) != NULL) {
|
||||
HeapTuple tuple = NULL;
|
||||
|
|
|
@ -167,7 +167,7 @@ void IndexScanFusion::Init(long max_rows)
|
|||
}
|
||||
|
||||
m_epq_indexqual = m_node->indexqualorig;
|
||||
m_reslot = MakeSingleTupleTableSlot(m_tupDesc, false, m_rel->rd_tam_type);
|
||||
m_reslot = MakeSingleTupleTableSlot(m_tupDesc, false, GetTableAmRoutine(m_rel->rd_tam_type));
|
||||
}
|
||||
|
||||
HeapTuple IndexScanFusion::getTuple()
|
||||
|
|
|
@ -108,7 +108,7 @@ void InsertFusion::InitLocals(ParamListInfo params)
|
|||
m_c_local.m_estate->es_range_table = m_global->m_planstmt->rtable;
|
||||
m_local.m_reslot = MakeSingleTupleTableSlot(m_global->m_tupDesc);
|
||||
if (m_global->m_table_type == TAM_USTORE) {
|
||||
m_local.m_reslot->tts_tupslotTableAm = TAM_USTORE;
|
||||
m_local.m_reslot->tts_tam_ops = TableAmUstore;
|
||||
}
|
||||
m_local.m_values = (Datum*)palloc0(m_global->m_natts * sizeof(Datum));
|
||||
m_local.m_isnull = (bool*)palloc0(m_global->m_natts * sizeof(bool));
|
||||
|
|
|
@ -47,7 +47,7 @@ void SelectForUpdateFusion::InitLocals(ParamListInfo params)
|
|||
{
|
||||
m_local.m_reslot = MakeSingleTupleTableSlot(m_global->m_tupDesc);
|
||||
if (m_global->m_table_type == TAM_USTORE) {
|
||||
m_local.m_reslot->tts_tupslotTableAm = TAM_USTORE;
|
||||
m_local.m_reslot->tts_tam_ops = TableAmUstore;
|
||||
}
|
||||
m_c_local.m_estate = CreateExecutorState();
|
||||
m_c_local.m_estate->es_range_table = m_global->m_planstmt->rtable;
|
||||
|
|
|
@ -54,7 +54,7 @@ void SortFusion::InitLocals(ParamListInfo params)
|
|||
if (!IsGlobal())
|
||||
m_global->m_tupDesc->tdTableAmType = m_local.m_scan->m_tupDesc->tdTableAmType;
|
||||
|
||||
m_local.m_reslot = MakeSingleTupleTableSlot(m_global->m_tupDesc, false, m_local.m_scan->m_tupDesc->tdTableAmType);
|
||||
m_local.m_reslot = MakeSingleTupleTableSlot(m_global->m_tupDesc, false, GetTableAmRoutine(m_local.m_scan->m_tupDesc->tdTableAmType));
|
||||
m_local.m_values = (Datum*)palloc0(m_global->m_tupDesc->natts * sizeof(Datum));
|
||||
m_local.m_isnull = (bool*)palloc0(m_global->m_tupDesc->natts * sizeof(bool));
|
||||
}
|
||||
|
|
|
@ -207,7 +207,7 @@ void UpdateFusion::InitLocals(ParamListInfo params)
|
|||
|
||||
m_local.m_reslot = MakeSingleTupleTableSlot(m_global->m_tupDesc);
|
||||
if (m_global->m_table_type == TAM_USTORE) {
|
||||
m_local.m_reslot->tts_tupslotTableAm = TAM_USTORE;
|
||||
m_local.m_reslot->tts_tam_ops = TableAmUstore;
|
||||
}
|
||||
m_local.m_values = (Datum*)palloc0(m_global->m_natts * sizeof(Datum));
|
||||
m_local.m_isnull = (bool*)palloc0(m_global->m_natts * sizeof(bool));
|
||||
|
|
|
@ -659,7 +659,7 @@ hashFileSource::hashFileSource(VectorBatch* batch, MemoryContext context, int ce
|
|||
m_tupleSize = 100;
|
||||
m_tuple = (MinimalTuple)palloc(m_tupleSize);
|
||||
m_tuple->t_len = m_tupleSize;
|
||||
m_hashTupleSlot = MakeTupleTableSlot(true, tuple_desc->tdTableAmType);
|
||||
m_hashTupleSlot = MakeTupleTableSlot(true, GetTableAmRoutine(tuple_desc->tdTableAmType));
|
||||
ExecSetSlotDescriptor(m_hashTupleSlot, tuple_desc);
|
||||
}
|
||||
|
||||
|
@ -675,7 +675,7 @@ hashFileSource::hashFileSource(TupleTableSlot* hash_slot, int file_num)
|
|||
m_context = NULL;
|
||||
if (m_hashTupleSlot->tts_tupleDescriptor == NULL) {
|
||||
ExecSetSlotDescriptor(m_hashTupleSlot, hash_slot->tts_tupleDescriptor);
|
||||
m_hashTupleSlot->tts_tupslotTableAm = hash_slot->tts_tupleDescriptor->tdTableAmType;
|
||||
m_hashTupleSlot->tts_tam_ops = GetTableAmRoutine(hash_slot->tts_tupleDescriptor->tdTableAmType);
|
||||
}
|
||||
|
||||
m_cols = 0;
|
||||
|
|
|
@ -268,7 +268,7 @@ VecToRowState* ExecInitVecToRow(VecToRow* node, EState* estate, int eflags)
|
|||
state->tts = state->ps.ps_ResultTupleSlot;
|
||||
(void)ExecClearTuple(state->tts);
|
||||
state->tts->tts_nvalid = state->nattrs;
|
||||
state->tts->tts_isempty = false;
|
||||
state->tts->tts_flags &= ~TTS_FLAG_EMPTY;
|
||||
state->devectorizeFunRuntime = (DevectorizeFun*)palloc0(state->nattrs * sizeof(DevectorizeFun));
|
||||
for (int i = 0; i < state->nattrs; i++) {
|
||||
state->tts->tts_isnull[i] = false;
|
||||
|
|
|
@ -1155,7 +1155,7 @@ static void slot_deform_tuple(TupleTableSlot *slot, uint32 natts)
|
|||
} else {
|
||||
/* Restore state from previous execution */
|
||||
off = slot->tts_off;
|
||||
slow = slot->tts_slow;
|
||||
slow = TTS_SLOW(slot);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1194,7 +1194,10 @@ static void slot_deform_tuple(TupleTableSlot *slot, uint32 natts)
|
|||
*/
|
||||
slot->tts_nvalid = attnum;
|
||||
slot->tts_off = off;
|
||||
slot->tts_slow = slow;
|
||||
if (slow)
|
||||
slot->tts_flags |= TTS_FLAG_SLOW;
|
||||
else
|
||||
slot->tts_flags &= ~TTS_FLAG_SLOW;
|
||||
}
|
||||
|
||||
|
||||
|
@ -1438,7 +1441,7 @@ Datum heap_slot_getattr(TupleTableSlot *slot, int attnum, bool *isnull, bool nee
|
|||
/* sanity checks */
|
||||
Assert(slot != NULL);
|
||||
Assert(slot->tts_tupleDescriptor != NULL);
|
||||
Assert(slot->tts_tupslotTableAm == TAM_HEAP);
|
||||
Assert(TTS_TABLEAM_IS_HEAP(slot));
|
||||
|
||||
HeapTuple tuple = (HeapTuple)slot->tts_tuple;
|
||||
TupleDesc tupleDesc = slot->tts_tupleDescriptor;
|
||||
|
@ -1558,7 +1561,7 @@ Datum heap_slot_getattr(TupleTableSlot *slot, int attnum, bool *isnull, bool nee
|
|||
*/
|
||||
void heap_slot_getallattrs(TupleTableSlot *slot, bool need_transform_anyarray)
|
||||
{
|
||||
Assert(slot->tts_tupslotTableAm == TAM_HEAP);
|
||||
Assert(TTS_TABLEAM_IS_HEAP(slot));
|
||||
|
||||
int tdesc_natts = slot->tts_tupleDescriptor->natts;
|
||||
int attnum;
|
||||
|
@ -1669,7 +1672,7 @@ void heap_slot_formbatch(TupleTableSlot* slot, VectorBatch* batch, int cur_rows,
|
|||
*/
|
||||
void heap_slot_getsomeattrs(TupleTableSlot *slot, int attnum)
|
||||
{
|
||||
Assert(slot->tts_tupslotTableAm == TAM_HEAP);
|
||||
Assert(TTS_TABLEAM_IS_HEAP(slot));
|
||||
|
||||
/* Quick out if we have 'em all already */
|
||||
if (slot->tts_nvalid >= attnum) {
|
||||
|
@ -1713,7 +1716,7 @@ bool heap_slot_attisnull(TupleTableSlot *slot, int attnum)
|
|||
HeapTuple tuple = (HeapTuple)slot->tts_tuple;
|
||||
TupleDesc tupleDesc = slot->tts_tupleDescriptor;
|
||||
|
||||
Assert(slot->tts_tupslotTableAm == TAM_HEAP);
|
||||
Assert(TTS_TABLEAM_IS_HEAP(slot));
|
||||
|
||||
/*
|
||||
* system attributes are handled by heap_attisnull
|
||||
|
@ -2978,7 +2981,7 @@ static void slot_deform_cmprs_tuple(TupleTableSlot *slot, uint32 natts)
|
|||
slot->tts_nvalid = attnum;
|
||||
slot->tts_off = off;
|
||||
slot->tts_meta_off = cmprsOff;
|
||||
slot->tts_slow = true;
|
||||
slot->tts_flags |= TTS_FLAG_SLOW;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -2990,20 +2993,20 @@ void heap_slot_clear(TupleTableSlot *slot)
|
|||
* sanity checks
|
||||
*/
|
||||
Assert(slot != NULL);
|
||||
Assert(slot->tts_tupslotTableAm == TAM_HEAP);
|
||||
Assert(TTS_TABLEAM_IS_HEAP(slot));
|
||||
|
||||
/*
|
||||
* Free any old physical tuple belonging to the slot.
|
||||
*/
|
||||
if (slot->tts_shouldFree) {
|
||||
if (TTS_SHOULDFREE(slot)) {
|
||||
heap_freetuple((HeapTuple)slot->tts_tuple);
|
||||
slot->tts_tuple = NULL;
|
||||
slot->tts_shouldFree = false;
|
||||
slot->tts_flags &= ~TTS_FLAG_SHOULDFREE;
|
||||
}
|
||||
|
||||
if (slot->tts_shouldFreeMin) {
|
||||
if (TTS_SHOULDFREEMIN(slot)) {
|
||||
heap_free_minimal_tuple(slot->tts_mintuple);
|
||||
slot->tts_shouldFreeMin = false;
|
||||
slot->tts_flags &= ~TTS_FLAG_SHOULDFREEMIN;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3019,14 +3022,14 @@ void heap_slot_materialize(TupleTableSlot *slot)
|
|||
* sanity checks
|
||||
*/
|
||||
Assert(slot != NULL);
|
||||
Assert(!slot->tts_isempty);
|
||||
Assert(!TTS_EMPTY(slot));
|
||||
Assert(slot->tts_tupleDescriptor != NULL);
|
||||
|
||||
/*
|
||||
* If we have a regular physical tuple, and it's locally palloc'd, we have
|
||||
* nothing to do.
|
||||
*/
|
||||
if (slot->tts_tuple && slot->tts_shouldFree && !HEAP_TUPLE_IS_COMPRESSED(((HeapTuple)slot->tts_tuple)->t_data))
|
||||
if (slot->tts_tuple && TTS_SHOULDFREE(slot) && !HEAP_TUPLE_IS_COMPRESSED(((HeapTuple)slot->tts_tuple)->t_data))
|
||||
return ;
|
||||
|
||||
/*
|
||||
|
@ -3038,7 +3041,7 @@ void heap_slot_materialize(TupleTableSlot *slot)
|
|||
*/
|
||||
MemoryContext old_context = MemoryContextSwitchTo(slot->tts_mcxt);
|
||||
slot->tts_tuple = heap_slot_copy_heap_tuple(slot);
|
||||
slot->tts_shouldFree = true;
|
||||
slot->tts_flags |= TTS_FLAG_SHOULDFREE;
|
||||
MemoryContextSwitchTo(old_context);
|
||||
|
||||
/*
|
||||
|
@ -3065,11 +3068,11 @@ void heap_slot_materialize(TupleTableSlot *slot)
|
|||
* storage, we must not pfree it now, since callers might have already
|
||||
* fetched datum pointers referencing it.)
|
||||
*/
|
||||
if (!slot->tts_shouldFreeMin) {
|
||||
if (!TTS_SHOULDFREEMIN(slot)) {
|
||||
slot->tts_mintuple = NULL;
|
||||
}
|
||||
#ifdef PGXC
|
||||
if (!slot->tts_shouldFreeRow) {
|
||||
if (!TTS_SHOULDFREE_ROW(slot)) {
|
||||
slot->tts_dataRow = NULL;
|
||||
slot->tts_dataLen = -1;
|
||||
}
|
||||
|
@ -3091,8 +3094,8 @@ MinimalTuple heap_slot_get_minimal_tuple(TupleTableSlot *slot) {
|
|||
* sanity checks
|
||||
*/
|
||||
Assert(slot != NULL);
|
||||
Assert(!slot->tts_isempty);
|
||||
Assert(slot->tts_tupslotTableAm == TAM_HEAP);
|
||||
Assert(!TTS_EMPTY(slot));
|
||||
Assert(TTS_TABLEAM_IS_HEAP(slot));
|
||||
|
||||
/*
|
||||
* If we have a minimal physical tuple (local or not) then just return it.
|
||||
|
@ -3109,7 +3112,7 @@ MinimalTuple heap_slot_get_minimal_tuple(TupleTableSlot *slot) {
|
|||
*/
|
||||
MemoryContext old_context = MemoryContextSwitchTo(slot->tts_mcxt);
|
||||
slot->tts_mintuple = heap_slot_copy_minimal_tuple(slot);
|
||||
slot->tts_shouldFreeMin = true;
|
||||
slot->tts_flags |= TTS_FLAG_SHOULDFREEMIN;
|
||||
MemoryContextSwitchTo(old_context);
|
||||
|
||||
/*
|
||||
|
@ -3138,9 +3141,9 @@ MinimalTuple heap_slot_copy_minimal_tuple(TupleTableSlot *slot)
|
|||
* sanity checks.
|
||||
*/
|
||||
Assert(slot != NULL);
|
||||
Assert(!slot->tts_isempty);
|
||||
Assert(!TTS_EMPTY(slot));
|
||||
Assert(slot->tts_tupleDescriptor != NULL);
|
||||
Assert(slot->tts_tupslotTableAm == TAM_HEAP);
|
||||
Assert(TTS_TABLEAM_IS_HEAP(slot));
|
||||
|
||||
/*
|
||||
* If we have a physical tuple then just copy it. Prefer to copy
|
||||
|
@ -3187,24 +3190,24 @@ void heap_slot_store_minimal_tuple(MinimalTuple mtup, TupleTableSlot *slot, bool
|
|||
Assert(mtup != NULL);
|
||||
Assert(slot != NULL);
|
||||
Assert(slot->tts_tupleDescriptor != NULL);
|
||||
Assert(slot->tts_tupslotTableAm == TAM_HEAP);
|
||||
Assert(TTS_TABLEAM_IS_HEAP(slot));
|
||||
|
||||
/*
|
||||
* Free any old physical tuple belonging to the slot.
|
||||
*/
|
||||
if (slot->tts_shouldFree && (HeapTuple)slot->tts_tuple != NULL) {
|
||||
if (TTS_SHOULDFREE(slot) && (HeapTuple)slot->tts_tuple != NULL) {
|
||||
heap_freetuple((HeapTuple)slot->tts_tuple);
|
||||
slot->tts_tuple = NULL;
|
||||
}
|
||||
if (slot->tts_shouldFreeMin) {
|
||||
if (TTS_SHOULDFREEMIN(slot)) {
|
||||
heap_free_minimal_tuple(slot->tts_mintuple);
|
||||
}
|
||||
|
||||
#ifdef PGXC
|
||||
if (slot->tts_shouldFreeRow) {
|
||||
if (TTS_SHOULDFREE_ROW(slot)) {
|
||||
pfree_ext(slot->tts_dataRow);
|
||||
}
|
||||
slot->tts_shouldFreeRow = false;
|
||||
slot->tts_flags &= ~TTS_FLAG_SHOULDFREE_ROW;
|
||||
slot->tts_dataRow = NULL;
|
||||
slot->tts_dataLen = -1;
|
||||
#endif
|
||||
|
@ -3220,9 +3223,12 @@ slot->tts_buffer = InvalidBuffer;
|
|||
/*
|
||||
* Store the new tuple into the specified slot.
|
||||
*/
|
||||
slot->tts_isempty = false;
|
||||
slot->tts_shouldFree = false;
|
||||
slot->tts_shouldFreeMin = shouldFree;
|
||||
slot->tts_flags &= ~TTS_FLAG_EMPTY;
|
||||
slot->tts_flags &= ~TTS_FLAG_SHOULDFREE;
|
||||
if (shouldFree)
|
||||
slot->tts_flags |= TTS_FLAG_SHOULDFREEMIN;
|
||||
else
|
||||
slot->tts_flags &= ~TTS_FLAG_SHOULDFREEMIN;
|
||||
slot->tts_tuple = &slot->tts_minhdr;
|
||||
slot->tts_mintuple = mtup;
|
||||
|
||||
|
@ -3249,7 +3255,7 @@ HeapTuple heap_slot_get_heap_tuple(TupleTableSlot* slot)
|
|||
* sanity checks
|
||||
*/
|
||||
Assert(slot != NULL);
|
||||
Assert(!slot->tts_isempty);
|
||||
Assert(!TTS_EMPTY(slot));
|
||||
Assert(slot->tts_tupleDescriptor != NULL);
|
||||
|
||||
/*
|
||||
|
@ -3283,7 +3289,7 @@ HeapTuple heap_slot_copy_heap_tuple(TupleTableSlot *slot)
|
|||
* sanity checks
|
||||
*/
|
||||
Assert(slot != NULL);
|
||||
Assert(!slot->tts_isempty);
|
||||
Assert(!TTS_EMPTY(slot));
|
||||
Assert(slot->tts_tupleDescriptor != NULL);
|
||||
|
||||
/*
|
||||
|
@ -3334,19 +3340,19 @@ void heap_slot_store_heap_tuple(HeapTuple tuple, TupleTableSlot* slot, Buffer bu
|
|||
/*
|
||||
* Free any old physical tuple belonging to the slot.
|
||||
*/
|
||||
if (slot->tts_shouldFree && (HeapTuple)slot->tts_tuple != NULL) {
|
||||
if (TTS_SHOULDFREE(slot) && (HeapTuple)slot->tts_tuple != NULL) {
|
||||
heap_freetuple((HeapTuple)slot->tts_tuple);
|
||||
slot->tts_tuple = NULL;
|
||||
}
|
||||
if (slot->tts_shouldFreeMin) {
|
||||
if (TTS_SHOULDFREEMIN(slot)) {
|
||||
heap_free_minimal_tuple(slot->tts_mintuple);
|
||||
}
|
||||
#ifdef ENABLE_MULTIPLE_NODES
|
||||
#ifdef PGXC
|
||||
if (slot->tts_shouldFreeRow) {
|
||||
if (TTS_SHOULDFREE_ROW(slot)) {
|
||||
pfree_ext(slot->tts_dataRow);
|
||||
}
|
||||
slot->tts_shouldFreeRow = false;
|
||||
slot->tts_flags &= ~TTS_FLAG_SHOULDFREE_ROW;
|
||||
slot->tts_dataRow = NULL;
|
||||
slot->tts_dataLen = -1;
|
||||
|
||||
|
@ -3365,9 +3371,12 @@ void heap_slot_store_heap_tuple(HeapTuple tuple, TupleTableSlot* slot, Buffer bu
|
|||
/*
|
||||
* Store the new tuple into the specified slot.
|
||||
*/
|
||||
slot->tts_isempty = false;
|
||||
slot->tts_shouldFree = should_free;
|
||||
slot->tts_shouldFreeMin = false;
|
||||
slot->tts_flags &= ~TTS_FLAG_EMPTY;
|
||||
if (should_free)
|
||||
slot->tts_flags |= TTS_FLAG_SHOULDFREE;
|
||||
else
|
||||
slot->tts_flags &= ~TTS_FLAG_SHOULDFREE;
|
||||
slot->tts_flags &= ~TTS_FLAG_SHOULDFREEMIN;
|
||||
slot->tts_tuple = tuple;
|
||||
slot->tts_mintuple = NULL;
|
||||
|
||||
|
|
|
@ -746,7 +746,7 @@ bool IndexGetnextSlot(IndexScanDesc scan, ScanDirection direction, TupleTableSlo
|
|||
ItemPointer tid;
|
||||
TupleTableSlot* tmpslot = NULL;
|
||||
tmpslot = MakeSingleTupleTableSlot(RelationGetDescr(scan->heapRelation),
|
||||
false, scan->heapRelation->rd_tam_type);
|
||||
false, GetTableAmRoutine(scan->heapRelation->rd_tam_type));
|
||||
for (;;) {
|
||||
/* IO collector and IO scheduler */
|
||||
#ifdef ENABLE_MULTIPLE_NODES
|
||||
|
|
|
@ -56,81 +56,6 @@
|
|||
* ------------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
|
||||
const TableAmRoutine *GetTableAmRoutine(TableAmType type)
|
||||
{
|
||||
return g_tableam_routines[type];
|
||||
}
|
||||
|
||||
/*
|
||||
* Clears the contents of the table slot that contains heap table tuple data.
|
||||
*/
|
||||
void tableam_tslot_clear(TupleTableSlot *slot)
|
||||
{
|
||||
return g_tableam_routines[slot->tts_tupslotTableAm]->tslot_clear(slot);
|
||||
}
|
||||
|
||||
HeapTuple tableam_tslot_materialize(TupleTableSlot *slot)
|
||||
{
|
||||
return g_tableam_routines[slot->tts_tupslotTableAm]->tslot_materialize(slot);
|
||||
}
|
||||
|
||||
MinimalTuple tableam_tslot_get_minimal_tuple(TupleTableSlot *slot)
|
||||
{
|
||||
return g_tableam_routines[slot->tts_tupslotTableAm]->tslot_get_minimal_tuple(slot);
|
||||
}
|
||||
|
||||
|
||||
MinimalTuple tableam_tslot_copy_minimal_tuple(TupleTableSlot *slot)
|
||||
{
|
||||
return g_tableam_routines[slot->tts_tupslotTableAm]->tslot_copy_minimal_tuple(slot);
|
||||
}
|
||||
|
||||
void tableam_tslot_store_minimal_tuple(MinimalTuple mtup, TupleTableSlot *slot, bool shouldFree)
|
||||
{
|
||||
g_tableam_routines[slot->tts_tupslotTableAm]->tslot_store_minimal_tuple(mtup, slot, shouldFree);
|
||||
}
|
||||
|
||||
HeapTuple tableam_tslot_get_heap_tuple(TupleTableSlot *slot)
|
||||
{
|
||||
return g_tableam_routines[slot->tts_tupslotTableAm]->tslot_get_heap_tuple(slot);
|
||||
}
|
||||
|
||||
HeapTuple tableam_tslot_copy_heap_tuple(TupleTableSlot *slot)
|
||||
{
|
||||
return g_tableam_routines[slot->tts_tupslotTableAm]->tslot_copy_heap_tuple(slot);
|
||||
}
|
||||
|
||||
void tableam_tslot_store_tuple(Tuple tuple, TupleTableSlot *slot, Buffer buffer, bool shouldFree, bool batchMode)
|
||||
{
|
||||
g_tableam_routines[GetTabelAmIndexTuple(tuple)]->tslot_store_tuple(tuple, slot, buffer, shouldFree, batchMode);
|
||||
}
|
||||
|
||||
void tableam_tslot_getsomeattrs(TupleTableSlot *slot, int natts)
|
||||
{
|
||||
g_tableam_routines[slot->tts_tupslotTableAm]->tslot_getsomeattrs(slot, natts);
|
||||
}
|
||||
|
||||
void tableam_tslot_formbatch(TupleTableSlot* slot, VectorBatch* batch, int cur_rows, int natts)
|
||||
{
|
||||
g_tableam_routines[slot->tts_tupslotTableAm]->tslot_formbatch(slot, batch, cur_rows, natts);
|
||||
}
|
||||
|
||||
Datum tableam_tslot_getattr(TupleTableSlot *slot, int attnum, bool *isnull)
|
||||
{
|
||||
return g_tableam_routines[slot->tts_tupslotTableAm]->tslot_getattr(slot, attnum, isnull);
|
||||
}
|
||||
|
||||
void tableam_tslot_getallattrs(TupleTableSlot *slot)
|
||||
{
|
||||
return g_tableam_routines[slot->tts_tupslotTableAm]->tslot_getallattrs(slot);
|
||||
}
|
||||
|
||||
bool tableam_tslot_attisnull(TupleTableSlot *slot, int attnum)
|
||||
{
|
||||
return g_tableam_routines[slot->tts_tupslotTableAm]->tslot_attisnull(slot, attnum);
|
||||
}
|
||||
|
||||
Tuple tableam_tslot_get_tuple_from_slot(Relation relation, TupleTableSlot *slot)
|
||||
{
|
||||
slot->tts_tupleDescriptor->tdhasuids = RELATION_HAS_UIDS(relation);
|
||||
|
@ -1025,7 +950,7 @@ void HeapamTcapInsertLost(Relation relation, Snapshot snap)
|
|||
TvInsertLost(RelationGetRelid(relation), snap);
|
||||
}
|
||||
|
||||
const TableAmRoutine g_heapam_methods = {
|
||||
static const TableAmRoutine g_heapam_methods = {
|
||||
/* ------------------------------------------------------------------------
|
||||
* TABLE SLOT AM APIs
|
||||
* ------------------------------------------------------------------------
|
||||
|
@ -1216,11 +1141,11 @@ bool UHeapamTslotAttisnull(TupleTableSlot *slot, int attnum)
|
|||
Tuple uheapam_tslot_get_tuple_from_slot(TupleTableSlot* slot)
|
||||
{
|
||||
UHeapTuple utuple = NULL;
|
||||
if (slot->tts_tupslotTableAm != TAM_USTORE) {
|
||||
if (!TTS_TABLEAM_IS_USTORE(slot)) {
|
||||
tableam_tslot_getallattrs(slot); // here has some main difference.
|
||||
utuple = (UHeapTuple)tableam_tops_form_tuple(slot->tts_tupleDescriptor, slot->tts_values, slot->tts_isnull,
|
||||
UHEAP_TUPLE);
|
||||
slot->tts_tupslotTableAm = TAM_USTORE;
|
||||
slot->tts_tam_ops = TableAmUstore;
|
||||
utuple->tupInfo = 1;
|
||||
ExecStoreTuple((Tuple)utuple, slot, InvalidBuffer, true);
|
||||
} else {
|
||||
|
@ -1400,7 +1325,7 @@ void UHeapamTopsUpdateTupleWithOid (Relation rel, Tuple tuple, TupleTableSlot *s
|
|||
if (RelationGetRelid(rel) != InvalidOid)
|
||||
((UHeapTuple)tuple)->table_oid = RelationGetRelid(rel);
|
||||
|
||||
if (slot->tts_tupslotTableAm != TAM_USTORE) {
|
||||
if (!TTS_TABLEAM_IS_USTORE(slot)) {
|
||||
/*
|
||||
* Global Partition Index stores the partition's tableOid with the index
|
||||
* tuple which is extracted from heap tuple of the slot in this case.
|
||||
|
@ -1718,7 +1643,7 @@ void UheapamTcapInsertLost(Relation relation, Snapshot snap)
|
|||
|
||||
/* All the function is pointer to heap function now, need to abstract the logic and replace with ustore function
|
||||
* after. */
|
||||
const TableAmRoutine g_ustoream_methods = {
|
||||
static const TableAmRoutine g_ustoream_methods = {
|
||||
|
||||
// XXXTAM: Currently heapam* methods are hacked to deal with uheap table methods.
|
||||
// separate them out into uheapam* and assign them below to the right am function pointer.
|
||||
|
@ -1836,3 +1761,6 @@ const TableAmRoutine * const g_tableam_routines[] = {
|
|||
&g_heapam_methods,
|
||||
&g_ustoream_methods
|
||||
};
|
||||
|
||||
const TableAmRoutine* TableAmHeap = &g_heapam_methods;
|
||||
const TableAmRoutine* TableAmUstore = &g_ustoream_methods;
|
|
@ -40,6 +40,7 @@
|
|||
#include "access/ustore/knl_utuple.h"
|
||||
#include "access/ustore/knl_utuptoaster.h"
|
||||
#include "access/ustore/knl_whitebox_test.h"
|
||||
#include "access/tableam.h"
|
||||
#include <stdlib.h>
|
||||
|
||||
static Bitmapset *UHeapDetermineModifiedColumns(Relation relation, Bitmapset *interesting_cols, UHeapTuple oldtup,
|
||||
|
@ -1833,7 +1834,7 @@ check_tup_satisfies_update:
|
|||
|
||||
/* create the old tuple for caller */
|
||||
if (oldslot) {
|
||||
*oldslot = MakeSingleTupleTableSlot(relation->rd_att, false, TAM_USTORE);
|
||||
*oldslot = MakeSingleTupleTableSlot(relation->rd_att, false, TableAmUstore);
|
||||
TupleDesc rowDesc = (*oldslot)->tts_tupleDescriptor;
|
||||
|
||||
UHeapTuple oldtupCopy = UHeapCopyTuple(&utuple);
|
||||
|
@ -2616,7 +2617,7 @@ check_tup_satisfies_update:
|
|||
|
||||
/* Till now, we know whether we will delete the old index */
|
||||
if (oldslot && (*modifiedIdxAttrs != NULL || !useInplaceUpdate)) {
|
||||
*oldslot = MakeSingleTupleTableSlot(relation->rd_att, false, TAM_USTORE);
|
||||
*oldslot = MakeSingleTupleTableSlot(relation->rd_att, false, TableAmUstore);
|
||||
TupleDesc rowDesc = (*oldslot)->tts_tupleDescriptor;
|
||||
|
||||
UHeapTuple oldtupCopy = UHeapCopyTuple(&oldtup);
|
||||
|
@ -3283,13 +3284,13 @@ static void TtsUHeapMaterialize(TupleTableSlot *slot)
|
|||
{
|
||||
MemoryContext oldContext;
|
||||
|
||||
Assert(!slot->tts_isempty);
|
||||
Assert(!TTS_EMPTY(slot));
|
||||
|
||||
/* If already materialized nothing to do. */
|
||||
if (slot->tts_shouldFree)
|
||||
if (TTS_SHOULDFREE(slot))
|
||||
return;
|
||||
|
||||
slot->tts_shouldFree = true;
|
||||
slot->tts_flags |= TTS_FLAG_SHOULDFREE;
|
||||
|
||||
oldContext = MemoryContextSwitchTo(slot->tts_mcxt);
|
||||
|
||||
|
@ -3301,7 +3302,7 @@ static void TtsUHeapMaterialize(TupleTableSlot *slot)
|
|||
slot->tts_tuple = UHeapFormTuple(slot->tts_tupleDescriptor, slot->tts_values, slot->tts_isnull);
|
||||
|
||||
/* Let the caller know this contains a UHeap tuple now */
|
||||
slot->tts_tupslotTableAm = TAM_USTORE;
|
||||
slot->tts_tam_ops = TableAmUstore;
|
||||
|
||||
MemoryContextSwitchTo(oldContext);
|
||||
|
||||
|
|
|
@ -1314,8 +1314,8 @@ HeapTuple UHeapCopyHeapTuple(TupleTableSlot *slot)
|
|||
{
|
||||
HeapTuple tuple;
|
||||
|
||||
Assert(!slot->tts_isempty);
|
||||
Assert(slot->tts_tupslotTableAm == TAM_USTORE);
|
||||
Assert(!TTS_EMPTY(slot));
|
||||
Assert(TTS_TABLEAM_IS_USTORE(slot));
|
||||
|
||||
UHeapSlotGetAllAttrs(slot);
|
||||
|
||||
|
@ -1342,20 +1342,20 @@ void UHeapSlotClear(TupleTableSlot *slot)
|
|||
* sanity checks
|
||||
*/
|
||||
Assert(slot != NULL);
|
||||
Assert(slot->tts_tupslotTableAm == TAM_USTORE);
|
||||
Assert(TTS_TABLEAM_IS_USTORE(slot));
|
||||
|
||||
/*
|
||||
* Free any old physical tuple belonging to the slot.
|
||||
*/
|
||||
if (slot->tts_shouldFree && (UHeapTuple)slot->tts_tuple != NULL) {
|
||||
if (TTS_SHOULDFREE(slot) && (UHeapTuple)slot->tts_tuple != NULL) {
|
||||
UHeapFreeTuple(slot->tts_tuple);
|
||||
slot->tts_tuple = NULL;
|
||||
slot->tts_shouldFree = false;
|
||||
slot->tts_flags &= ~TTS_FLAG_SHOULDFREE;
|
||||
}
|
||||
|
||||
if (slot->tts_shouldFreeMin) {
|
||||
if (TTS_SHOULDFREEMIN(slot)) {
|
||||
heap_free_minimal_tuple(slot->tts_mintuple);
|
||||
slot->tts_shouldFreeMin = false;
|
||||
slot->tts_flags &= ~TTS_FLAG_SHOULDFREEMIN;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1369,7 +1369,7 @@ void UHeapSlotClear(TupleTableSlot *slot)
|
|||
*/
|
||||
void UHeapSlotGetSomeAttrs(TupleTableSlot *slot, int attnum)
|
||||
{
|
||||
Assert(slot->tts_tupslotTableAm == TAM_USTORE);
|
||||
Assert(TTS_TABLEAM_IS_USTORE(slot));
|
||||
|
||||
/* Quick out if we have 'em all already */
|
||||
if (slot->tts_nvalid >= attnum) {
|
||||
|
@ -1383,7 +1383,7 @@ void UHeapSlotGetSomeAttrs(TupleTableSlot *slot, int attnum)
|
|||
|
||||
void UHeapSlotFormBatch(TupleTableSlot* slot, VectorBatch* batch, int cur_rows, int attnum)
|
||||
{
|
||||
Assert(slot->tts_tupslotTableAm == TAM_USTORE);
|
||||
Assert(TTS_TABLEAM_IS_USTORE(slot));
|
||||
|
||||
/* Quick out if we have all already */
|
||||
if (slot->tts_nvalid >= attnum) {
|
||||
|
@ -1479,7 +1479,7 @@ bool UHeapSlotAttIsNull(const TupleTableSlot *slot, int attnum)
|
|||
TupleDesc tupleDesc = slot->tts_tupleDescriptor;
|
||||
UHeapTuple uhtup = (UHeapTuple)slot->tts_tuple;
|
||||
|
||||
Assert(slot->tts_tupslotTableAm == TAM_USTORE);
|
||||
Assert(TTS_TABLEAM_IS_USTORE(slot));
|
||||
|
||||
/*
|
||||
* system attributes are handled by heap_attisnull
|
||||
|
@ -1536,7 +1536,7 @@ bool UHeapSlotAttIsNull(const TupleTableSlot *slot, int attnum)
|
|||
*/
|
||||
void UHeapSlotGetAllAttrs(TupleTableSlot *slot)
|
||||
{
|
||||
Assert(slot->tts_tupslotTableAm == TAM_USTORE);
|
||||
Assert(TTS_TABLEAM_IS_USTORE(slot));
|
||||
|
||||
/* Quick out if we have 'em all already */
|
||||
if (slot->tts_nvalid == slot->tts_tupleDescriptor->natts) {
|
||||
|
@ -1673,9 +1673,9 @@ MinimalTuple UHeapSlotCopyMinimalTuple(TupleTableSlot *slot)
|
|||
* sanity checks.
|
||||
*/
|
||||
Assert(slot != NULL);
|
||||
Assert(!slot->tts_isempty);
|
||||
Assert(!TTS_EMPTY(slot));
|
||||
Assert(slot->tts_tupleDescriptor != NULL);
|
||||
Assert(slot->tts_tupslotTableAm == TAM_USTORE);
|
||||
Assert(TTS_TABLEAM_IS_USTORE(slot));
|
||||
|
||||
UHeapSlotGetAllAttrs(slot);
|
||||
|
||||
|
@ -1698,7 +1698,7 @@ MinimalTuple UHeapSlotGetMinimalTuple(TupleTableSlot *slot)
|
|||
* sanity checks
|
||||
*/
|
||||
Assert(slot != NULL);
|
||||
Assert(!slot->tts_isempty);
|
||||
Assert(!TTS_EMPTY(slot));
|
||||
|
||||
/*
|
||||
* If we have a minimal physical tuple (local or not) then just return it.
|
||||
|
@ -1715,7 +1715,7 @@ MinimalTuple UHeapSlotGetMinimalTuple(TupleTableSlot *slot)
|
|||
*/
|
||||
MemoryContext oldContext = MemoryContextSwitchTo(slot->tts_mcxt);
|
||||
slot->tts_mintuple = UHeapSlotCopyMinimalTuple(slot);
|
||||
slot->tts_shouldFreeMin = true;
|
||||
slot->tts_flags |= TTS_FLAG_SHOULDFREEMIN;
|
||||
MemoryContextSwitchTo(oldContext);
|
||||
|
||||
/*
|
||||
|
@ -1743,16 +1743,16 @@ void UHeapSlotStoreMinimalTuple(MinimalTuple mtup, TupleTableSlot *slot, bool sh
|
|||
Assert(mtup != NULL);
|
||||
Assert(slot != NULL);
|
||||
Assert(slot->tts_tupleDescriptor != NULL);
|
||||
Assert(slot->tts_tupslotTableAm == TAM_USTORE);
|
||||
Assert(TTS_TABLEAM_IS_USTORE(slot));
|
||||
|
||||
/*
|
||||
* Free any old physical tuple belonging to the slot.
|
||||
*/
|
||||
if (slot->tts_shouldFree && (UHeapTuple)slot->tts_tuple != NULL) {
|
||||
if (TTS_SHOULDFREE(slot) && (UHeapTuple)slot->tts_tuple != NULL) {
|
||||
UHeapFreeTuple(slot->tts_tuple);
|
||||
slot->tts_tuple = NULL;
|
||||
}
|
||||
if (slot->tts_shouldFreeMin) {
|
||||
if (TTS_SHOULDFREEMIN(slot)) {
|
||||
heap_free_minimal_tuple(slot->tts_mintuple);
|
||||
}
|
||||
|
||||
|
@ -1767,9 +1767,13 @@ void UHeapSlotStoreMinimalTuple(MinimalTuple mtup, TupleTableSlot *slot, bool sh
|
|||
/*
|
||||
* Store the new tuple into the specified slot.
|
||||
*/
|
||||
slot->tts_isempty = false;
|
||||
slot->tts_shouldFree = false;
|
||||
slot->tts_shouldFreeMin = shouldFree;
|
||||
slot->tts_flags &= ~TTS_FLAG_EMPTY;
|
||||
slot->tts_flags &= ~TTS_FLAG_SHOULDFREE;
|
||||
if (shouldFree)
|
||||
slot->tts_flags |= TTS_FLAG_SHOULDFREEMIN;
|
||||
else
|
||||
slot->tts_flags &= ~TTS_FLAG_SHOULDFREEMIN;
|
||||
|
||||
slot->tts_tuple = &slot->tts_minhdr;
|
||||
slot->tts_mintuple = mtup;
|
||||
|
||||
|
@ -1778,7 +1782,7 @@ void UHeapSlotStoreMinimalTuple(MinimalTuple mtup, TupleTableSlot *slot, bool sh
|
|||
slot->tts_minhdr.t_data = (HeapTupleHeader)((char *)mtup - MINIMAL_TUPLE_OFFSET);
|
||||
|
||||
/* This slot now contains a HEAP_TUPLE so make sure to let callers know how to read it */
|
||||
slot->tts_tupslotTableAm = TAM_HEAP;
|
||||
slot->tts_tam_ops = TableAmHeap;
|
||||
|
||||
/* no need to set t_self or t_tableOid since we won't allow access */
|
||||
/* Mark extracted state invalid */
|
||||
|
@ -1798,12 +1802,12 @@ void UHeapSlotStoreUHeapTuple(UHeapTuple utuple, TupleTableSlot *slot, bool shou
|
|||
* sanity checks
|
||||
*/
|
||||
Assert(utuple != NULL && utuple->tupTableType == UHEAP_TUPLE);
|
||||
Assert(slot != NULL && slot->tts_tupslotTableAm == TAM_USTORE);
|
||||
Assert(slot != NULL && TTS_TABLEAM_IS_USTORE(slot));
|
||||
Assert(slot->tts_tupleDescriptor != NULL);
|
||||
|
||||
if (slot->tts_shouldFreeMin) {
|
||||
if (TTS_SHOULDFREEMIN(slot)) {
|
||||
heap_free_minimal_tuple(slot->tts_mintuple);
|
||||
slot->tts_shouldFreeMin = false;
|
||||
slot->tts_flags &= ~TTS_FLAG_SHOULDFREEMIN;
|
||||
}
|
||||
|
||||
UHeapSlotClear(slot);
|
||||
|
@ -1811,9 +1815,12 @@ void UHeapSlotStoreUHeapTuple(UHeapTuple utuple, TupleTableSlot *slot, bool shou
|
|||
/*
|
||||
* Store the new tuple into the specified slot.
|
||||
*/
|
||||
slot->tts_isempty = false;
|
||||
slot->tts_shouldFree = shouldFree;
|
||||
slot->tts_shouldFreeMin = false;
|
||||
slot->tts_flags &= ~TTS_FLAG_EMPTY;
|
||||
if (shouldFree)
|
||||
slot->tts_flags |= TTS_FLAG_SHOULDFREE;
|
||||
else
|
||||
slot->tts_flags &= ~TTS_FLAG_SHOULDFREE;
|
||||
slot->tts_flags &= ~TTS_FLAG_SHOULDFREEMIN;
|
||||
slot->tts_tuple = utuple;
|
||||
slot->tts_mintuple = NULL;
|
||||
|
||||
|
@ -1829,14 +1836,14 @@ void UHeapSlotStoreUHeapTuple(UHeapTuple utuple, TupleTableSlot *slot, bool shou
|
|||
*/
|
||||
Tuple UHeapMaterialize(TupleTableSlot *slot)
|
||||
{
|
||||
Assert(!slot->tts_isempty);
|
||||
Assert(slot->tts_tupslotTableAm == TAM_USTORE);
|
||||
Assert(!TTS_EMPTY(slot));
|
||||
Assert(TTS_TABLEAM_IS_USTORE(slot));
|
||||
Assert(slot->tts_tupleDescriptor != NULL);
|
||||
/*
|
||||
* If we have a regular physical tuple, and it's locally palloc'd, we have
|
||||
* nothing to do.
|
||||
*/
|
||||
if (slot->tts_tuple && slot->tts_shouldFree) {
|
||||
if (slot->tts_tuple && TTS_SHOULDFREE(slot)) {
|
||||
return slot->tts_tuple;
|
||||
}
|
||||
|
||||
|
@ -1853,7 +1860,7 @@ Tuple UHeapMaterialize(TupleTableSlot *slot)
|
|||
} else {
|
||||
slot->tts_tuple = UHeapFormTuple(slot->tts_tupleDescriptor, slot->tts_values, slot->tts_isnull);
|
||||
}
|
||||
slot->tts_shouldFree = true;
|
||||
slot->tts_flags |= TTS_FLAG_SHOULDFREE;
|
||||
MemoryContextSwitchTo(old_context);
|
||||
|
||||
/*
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
#include "access/genam.h"
|
||||
#include "access/heapam.h"
|
||||
#include "access/tableam.h"
|
||||
#include "nodes/relation.h"
|
||||
#include "access/tuptoaster.h"
|
||||
#include "access/ustore/knl_utuptoaster.h"
|
||||
|
@ -56,7 +57,7 @@ Oid UHeapGetNewOidWithIndex(Relation relation, Oid indexId, AttrNumber oidcolumn
|
|||
ScanKeyData key;
|
||||
bool collides = false;
|
||||
Assert(RelationIsUstoreFormat(relation) || RelationIsToast(relation));
|
||||
TupleTableSlot *slot = MakeSingleTupleTableSlot(RelationGetDescr(relation), false, relation->rd_tam_type);
|
||||
TupleTableSlot *slot = MakeSingleTupleTableSlot(RelationGetDescr(relation), false, GetTableAmRoutine(relation->rd_tam_type));
|
||||
/* Generate new OIDs until we find one not in the table */
|
||||
do {
|
||||
CHECK_FOR_INTERRUPTS();
|
||||
|
@ -897,7 +898,7 @@ static void UHeapToastDeleteDatum(Relation rel, Datum value, int options)
|
|||
/* The toast table of ustore table should also be of ustore type */
|
||||
Assert(RelationIsUstoreFormat(toastrel));
|
||||
/* should index must be ustore format ? */
|
||||
TupleTableSlot *slot = MakeSingleTupleTableSlot(RelationGetDescr(toastrel), false, toastrel->rd_tam_type);
|
||||
TupleTableSlot *slot = MakeSingleTupleTableSlot(RelationGetDescr(toastrel), false, GetTableAmRoutine(toastrel->rd_tam_type));
|
||||
|
||||
/*
|
||||
* Setup a scan key to find chunks with matching va_valueid
|
||||
|
@ -960,7 +961,7 @@ struct varlena *UHeapInternalToastFetchDatum(struct varatt_external toastPointer
|
|||
SET_VARSIZE(result, ressize + VARHDRSZ);
|
||||
|
||||
toastTupDesc = toastrel->rd_att;
|
||||
TupleTableSlot *slot = MakeSingleTupleTableSlot(RelationGetDescr(toastrel), false, toastrel->rd_tam_type);
|
||||
TupleTableSlot *slot = MakeSingleTupleTableSlot(RelationGetDescr(toastrel), false, GetTableAmRoutine(toastrel->rd_tam_type));
|
||||
|
||||
/*
|
||||
* Setup a scan key to fetch from the index by va_valueid
|
||||
|
@ -1132,7 +1133,7 @@ struct varlena *UHeapInternalToastFetchDatumSlice(struct varatt_external toastPo
|
|||
* Open the toast relation and its index
|
||||
*/
|
||||
toastTupDesc = toastrel->rd_att;
|
||||
TupleTableSlot *slot = MakeSingleTupleTableSlot(RelationGetDescr(toastrel), false, toastrel->rd_tam_type);
|
||||
TupleTableSlot *slot = MakeSingleTupleTableSlot(RelationGetDescr(toastrel), false, GetTableAmRoutine(toastrel->rd_tam_type));
|
||||
|
||||
/*
|
||||
* Setup a scan key to fetch from the index. This is either two keys or
|
||||
|
@ -1267,7 +1268,7 @@ static bool UHeapToastRelValueidExists(Relation toastrel, Oid valueid)
|
|||
SysScanDesc toastscan;
|
||||
TupleTableSlot *slot = NULL;
|
||||
Assert(RelationIsUstoreFormat(toastrel));
|
||||
slot = MakeSingleTupleTableSlot(RelationGetDescr(toastrel), false, toastrel->rd_tam_type);
|
||||
slot = MakeSingleTupleTableSlot(RelationGetDescr(toastrel), false, GetTableAmRoutine(toastrel->rd_tam_type));
|
||||
|
||||
/*
|
||||
* Setup a scan key to find chunks with matching va_valueid
|
||||
|
|
|
@ -528,7 +528,7 @@ ForeignScanState *buildRelatedStateInfo(Relation relation, DistFdwFileSegment *s
|
|||
;
|
||||
|
||||
/* setup tuple slot */
|
||||
scanTupleSlot = MakeTupleTableSlot(true, tupleDescriptor->tdTableAmType);
|
||||
scanTupleSlot = MakeTupleTableSlot(true, GetTableAmRoutine(tupleDescriptor->tdTableAmType));
|
||||
scanTupleSlot->tts_tupleDescriptor = tupleDescriptor;
|
||||
scanTupleSlot->tts_values = columnValues;
|
||||
scanTupleSlot->tts_isnull = columnNulls;
|
||||
|
@ -643,7 +643,7 @@ static int distAcquireSampleRows(Relation relation, int logLevel, HeapTuple *sam
|
|||
(void)MemoryContextSwitchTo(oldContext);
|
||||
|
||||
/* if there are no more records to read, break */
|
||||
if (scanTupleSlot->tts_isempty) {
|
||||
if (TTS_EMPTY(scanTupleSlot)) {
|
||||
break;
|
||||
}
|
||||
|
||||
|
|
|
@ -1612,7 +1612,7 @@ retry:
|
|||
/*
|
||||
* Optimize foreign scan by using informational constraint.
|
||||
*/
|
||||
if (((ForeignScan *)node->ss.ps.plan)->scan.predicate_pushdown_optimized && false == slot->tts_isempty) {
|
||||
if (((ForeignScan *)node->ss.ps.plan)->scan.predicate_pushdown_optimized && !TTS_EMPTY(slot)) {
|
||||
/*
|
||||
* If we find a suitable tuple, set is_scan_end value is true.
|
||||
* It means that we do not find suitable tuple in the next iteration,
|
||||
|
|
|
@ -176,7 +176,7 @@ static EState *create_estate_for_relation(LogicalRepRelMapEntry *rel)
|
|||
|
||||
/* Triggers might need a slot */
|
||||
if (resultRelInfo->ri_TrigDesc)
|
||||
estate->es_trig_tuple_slot = ExecInitExtraTupleSlot(estate, rel->localrel->rd_tam_type);
|
||||
estate->es_trig_tuple_slot = ExecInitExtraTupleSlot(estate, GetTableAmRoutine(rel->localrel->rd_tam_type));
|
||||
|
||||
/* Prepare to catch AFTER triggers. */
|
||||
AfterTriggerBeginQuery();
|
||||
|
@ -558,7 +558,7 @@ static void apply_handle_insert(StringInfo s)
|
|||
|
||||
/* Initialize the executor state. */
|
||||
estate = create_estate_for_relation(rel);
|
||||
remoteslot = ExecInitExtraTupleSlot(estate, rel->localrel->rd_tam_type);
|
||||
remoteslot = ExecInitExtraTupleSlot(estate, GetTableAmRoutine(rel->localrel->rd_tam_type));
|
||||
ExecSetSlotDescriptor(remoteslot, RelationGetDescr(rel->localrel));
|
||||
|
||||
/* Input functions may need an active snapshot, so get one */
|
||||
|
@ -676,9 +676,9 @@ static void apply_handle_update(StringInfo s)
|
|||
|
||||
/* Initialize the executor state. */
|
||||
estate = create_estate_for_relation(rel);
|
||||
remoteslot = ExecInitExtraTupleSlot(estate, rel->localrel->rd_tam_type);
|
||||
remoteslot = ExecInitExtraTupleSlot(estate, GetTableAmRoutine(rel->localrel->rd_tam_type));
|
||||
ExecSetSlotDescriptor(remoteslot, RelationGetDescr(rel->localrel));
|
||||
localslot = ExecInitExtraTupleSlot(estate, rel->localrel->rd_tam_type);
|
||||
localslot = ExecInitExtraTupleSlot(estate, GetTableAmRoutine(rel->localrel->rd_tam_type));
|
||||
ExecSetSlotDescriptor(localslot, RelationGetDescr(rel->localrel));
|
||||
EvalPlanQualInit(&epqstate, estate, NULL, NIL, -1);
|
||||
|
||||
|
@ -786,9 +786,9 @@ static void apply_handle_delete(StringInfo s)
|
|||
|
||||
/* Initialize the executor state. */
|
||||
estate = create_estate_for_relation(rel);
|
||||
remoteslot = ExecInitExtraTupleSlot(estate, rel->localrel->rd_tam_type);
|
||||
remoteslot = ExecInitExtraTupleSlot(estate, GetTableAmRoutine(rel->localrel->rd_tam_type));
|
||||
ExecSetSlotDescriptor(remoteslot, RelationGetDescr(rel->localrel));
|
||||
localslot = ExecInitExtraTupleSlot(estate, rel->localrel->rd_tam_type);
|
||||
localslot = ExecInitExtraTupleSlot(estate, GetTableAmRoutine(rel->localrel->rd_tam_type));
|
||||
ExecSetSlotDescriptor(localslot, RelationGetDescr(rel->localrel));
|
||||
EvalPlanQualInit(&epqstate, estate, NULL, NIL, -1);
|
||||
|
||||
|
|
|
@ -796,7 +796,7 @@ static void TvUheapInsertLostImpl(Relation rel, Relation partRel, Partition p,
|
|||
|
||||
Relation relRel = (partRel != NULL) ? partRel : rel;
|
||||
/* Set up a tuple slot too */
|
||||
myslot = ExecInitExtraTupleSlot(estate, TAM_USTORE);
|
||||
myslot = ExecInitExtraTupleSlot(estate, TableAmUstore);
|
||||
ExecSetSlotDescriptor(myslot, RelationGetDescr(relRel));
|
||||
|
||||
/* Switch into its memory context */
|
||||
|
|
|
@ -496,25 +496,93 @@ typedef struct TableAmRoutine {
|
|||
void (*tcap_insert_lost)(Relation relation, Snapshot snap);
|
||||
} TableAmRoutine;
|
||||
|
||||
|
||||
extern const TableAmRoutine * const g_tableam_routines[];
|
||||
extern void HeapamScanIndexFetchEnd(IndexFetchTableData *scan);
|
||||
extern void heapam_index_fetch_reset(IndexFetchTableData *scan);
|
||||
extern IndexFetchTableData *HeapamScanIndexFetchBegin(Relation rel);
|
||||
|
||||
extern const TableAmRoutine *GetTableAmRoutine(TableAmType type);
|
||||
extern void tableam_tslot_clear(TupleTableSlot *slot);
|
||||
extern HeapTuple tableam_tslot_materialize(TupleTableSlot *slot);
|
||||
extern MinimalTuple tableam_tslot_get_minimal_tuple(TupleTableSlot *slot);
|
||||
extern MinimalTuple tableam_tslot_copy_minimal_tuple(TupleTableSlot *slot);
|
||||
extern void tableam_tslot_store_minimal_tuple(MinimalTuple mtup, TupleTableSlot *slot, bool shouldFree);
|
||||
extern HeapTuple tableam_tslot_get_heap_tuple(TupleTableSlot *slot);
|
||||
extern HeapTuple tableam_tslot_copy_heap_tuple(TupleTableSlot *slot);
|
||||
extern void tableam_tslot_store_tuple(Tuple tuple, TupleTableSlot *slot, Buffer buffer, bool shouldFree, bool batchMode);
|
||||
extern void tableam_tslot_getsomeattrs(TupleTableSlot *slot, int natts);
|
||||
extern Datum tableam_tslot_getattr(TupleTableSlot *slot, int attnum, bool *isnull);
|
||||
extern void tableam_tslot_getallattrs(TupleTableSlot *slot);
|
||||
extern void tableam_tslot_formbatch(TupleTableSlot* slot, VectorBatch* batch, int cur_rows, int natts);
|
||||
extern bool tableam_tslot_attisnull(TupleTableSlot *slot, int attnum);
|
||||
static inline const TableAmRoutine* GetTableAmRoutine(TableAmType type)
|
||||
{
|
||||
Assert(type == TAM_HEAP || type == TAM_USTORE);
|
||||
return type == TAM_HEAP ? TableAmHeap : TableAmUstore;
|
||||
}
|
||||
|
||||
static inline TableAmType GetTableAmType(const TableAmRoutine* ops)
|
||||
{
|
||||
Assert(ops == TableAmHeap || ops == TableAmUstore);
|
||||
return ops == TableAmHeap ? TAM_HEAP : TAM_USTORE;
|
||||
}
|
||||
|
||||
/*
|
||||
* Clears the contents of the table slot that contains heap table tuple data.
|
||||
*/
|
||||
static inline void tableam_tslot_clear(TupleTableSlot *slot)
|
||||
{
|
||||
return slot->tts_tam_ops->tslot_clear(slot);
|
||||
}
|
||||
|
||||
static inline HeapTuple tableam_tslot_materialize(TupleTableSlot *slot)
|
||||
{
|
||||
return slot->tts_tam_ops->tslot_materialize(slot);
|
||||
}
|
||||
|
||||
static inline MinimalTuple tableam_tslot_get_minimal_tuple(TupleTableSlot *slot)
|
||||
{
|
||||
return slot->tts_tam_ops->tslot_get_minimal_tuple(slot);
|
||||
}
|
||||
|
||||
static inline MinimalTuple tableam_tslot_copy_minimal_tuple(TupleTableSlot *slot)
|
||||
{
|
||||
return slot->tts_tam_ops->tslot_copy_minimal_tuple(slot);
|
||||
}
|
||||
|
||||
static inline void tableam_tslot_store_minimal_tuple(MinimalTuple mtup, TupleTableSlot *slot, bool shouldFree)
|
||||
{
|
||||
slot->tts_tam_ops->tslot_store_minimal_tuple(mtup, slot, shouldFree);
|
||||
}
|
||||
|
||||
static inline HeapTuple tableam_tslot_get_heap_tuple(TupleTableSlot *slot)
|
||||
{
|
||||
return slot->tts_tam_ops->tslot_get_heap_tuple(slot);
|
||||
}
|
||||
|
||||
static inline HeapTuple tableam_tslot_copy_heap_tuple(TupleTableSlot *slot)
|
||||
{
|
||||
return slot->tts_tam_ops->tslot_copy_heap_tuple(slot);
|
||||
}
|
||||
|
||||
static inline void tableam_tslot_store_tuple(Tuple tuple, TupleTableSlot *slot, Buffer buffer, bool shouldFree, bool batchMode)
|
||||
{
|
||||
Assert(slot->tts_tam_ops == GetTableAmRoutine(TableAmType(GetTabelAmIndexTuple(tuple))));
|
||||
slot->tts_tam_ops->tslot_store_tuple(tuple, slot, buffer, shouldFree, batchMode);
|
||||
}
|
||||
|
||||
static inline void tableam_tslot_getsomeattrs(TupleTableSlot *slot, int natts)
|
||||
{
|
||||
slot->tts_tam_ops->tslot_getsomeattrs(slot, natts);
|
||||
}
|
||||
|
||||
static inline void tableam_tslot_formbatch(TupleTableSlot* slot, VectorBatch* batch, int cur_rows, int natts)
|
||||
{
|
||||
slot->tts_tam_ops->tslot_formbatch(slot, batch, cur_rows, natts);
|
||||
}
|
||||
|
||||
static inline Datum tableam_tslot_getattr(TupleTableSlot *slot, int attnum, bool *isnull)
|
||||
{
|
||||
return slot->tts_tam_ops->tslot_getattr(slot, attnum, isnull);
|
||||
}
|
||||
|
||||
static inline void tableam_tslot_getallattrs(TupleTableSlot *slot)
|
||||
{
|
||||
return slot->tts_tam_ops->tslot_getallattrs(slot);
|
||||
}
|
||||
|
||||
static inline bool tableam_tslot_attisnull(TupleTableSlot *slot, int attnum)
|
||||
{
|
||||
return slot->tts_tam_ops->tslot_attisnull(slot, attnum);
|
||||
}
|
||||
|
||||
extern Tuple tableam_tslot_get_tuple_from_slot(Relation relation, TupleTableSlot *slot);
|
||||
extern Datum tableam_tops_getsysattr(Tuple tup, int attnum, TupleDesc tuple_desc, bool *isnull,
|
||||
Buffer buf = InvalidBuffer);
|
||||
|
|
|
@ -41,6 +41,14 @@ typedef enum tableAmType
|
|||
TAM_USTORE = 1,
|
||||
} TableAmType;
|
||||
|
||||
/*
|
||||
* Predefined TableAmRoutine for various types of table AM. The
|
||||
* same are used to identify the table AM of a given slot.
|
||||
*/
|
||||
struct TableAmRoutine;
|
||||
extern const TableAmRoutine* TableAmHeap;
|
||||
extern const TableAmRoutine* TableAmUstore;
|
||||
|
||||
/* index page split methods */
|
||||
#define INDEXSPLIT_NO_DEFAULT 0 /* default split method, aimed at equal split */
|
||||
#define INDEXSPLIT_NO_INSERTPT 1 /* insertpt */
|
||||
|
|
|
@ -388,7 +388,7 @@ ExecProject(ProjectionInfo *projInfo)
|
|||
* Successfully formed a result row. Mark the result slot as containing a
|
||||
* valid virtual tuple (inlined version of ExecStoreVirtualTuple()).
|
||||
*/
|
||||
slot->tts_isempty = false;
|
||||
slot->tts_flags &= ~TTS_FLAG_EMPTY;
|
||||
slot->tts_nvalid = slot->tts_tupleDescriptor->natts;
|
||||
|
||||
return slot;
|
||||
|
@ -448,9 +448,9 @@ extern void ExecEvalParamExternTableOfIndex(Node* node, ExecTableOfIndexInfo* ex
|
|||
/*
|
||||
* prototypes from functions in execTuples.c
|
||||
*/
|
||||
extern void ExecInitResultTupleSlot(EState* estate, PlanState* planstate, TableAmType tam = TAM_HEAP);
|
||||
extern void ExecInitScanTupleSlot(EState* estate, ScanState* scanstate, TableAmType tam = TAM_HEAP);
|
||||
extern TupleTableSlot* ExecInitExtraTupleSlot(EState* estate, TableAmType tam = TAM_HEAP);
|
||||
extern void ExecInitResultTupleSlot(EState* estate, PlanState* planstate, const TableAmRoutine* tam_ops = TableAmHeap);
|
||||
extern void ExecInitScanTupleSlot(EState* estate, ScanState* scanstate, const TableAmRoutine* tam_ops = TableAmHeap);
|
||||
extern TupleTableSlot* ExecInitExtraTupleSlot(EState* estate, const TableAmRoutine* tam_ops = TableAmHeap);
|
||||
extern TupleTableSlot* ExecInitNullTupleSlot(EState* estate, TupleDesc tupType);
|
||||
extern TupleDesc ExecTypeFromTL(List* targetList, bool hasoid, bool markdropped = false, TableAmType tam = TAM_HEAP);
|
||||
extern TupleDesc ExecCleanTypeFromTL(List* targetList, bool hasoid, TableAmType tam = TAM_HEAP);
|
||||
|
|
|
@ -65,11 +65,11 @@
|
|||
* ie, only as needed. This serves to avoid repeated extraction of data
|
||||
* from the physical tuple.
|
||||
*
|
||||
* A TupleTableSlot can also be "empty", holding no valid data. This is
|
||||
* the only valid state for a freshly-created slot that has not yet had a
|
||||
* tuple descriptor assigned to it. In this state, tts_isempty must be
|
||||
* TRUE, tts_shouldFree FALSE, tts_tuple NULL, tts_buffer InvalidBuffer,
|
||||
* and tts_nvalid zero.
|
||||
* A TupleTableSlot can also be "empty", indicated by flag TTS_EMPTY set in
|
||||
* tts_flags, holding no valid data. This is the only valid state for a
|
||||
* freshly-created slot that has not yet had a tuple descriptor assigned to it.
|
||||
* In this state, TTS_SHOULDFREE should not be set in tts_flag, tts_tuple must
|
||||
* be NULL, tts_buffer InvalidBuffer, and tts_nvalid zero.
|
||||
*
|
||||
* The tupleDescriptor is simply referenced, not copied, by the TupleTableSlot
|
||||
* code. The caller of ExecSetSlotDescriptor() is responsible for providing
|
||||
|
@ -79,8 +79,9 @@
|
|||
* mechanism to do more. However, the slot will increment the tupdesc
|
||||
* reference count if a reference-counted tupdesc is supplied.)
|
||||
*
|
||||
* When tts_shouldFree is true, the physical tuple is "owned" by the slot
|
||||
* and should be freed when the slot's reference to the tuple is dropped.
|
||||
* When TTS_SHOULDFREE is set in tts_flags, the physical tuple is "owned" by
|
||||
* the slot and should be freed when the slot's reference to the tuple is
|
||||
* dropped.
|
||||
*
|
||||
* If tts_buffer is not InvalidBuffer, then the slot is holding a pin
|
||||
* on the indicated buffer page; drop the pin when we release the
|
||||
|
@ -106,55 +107,83 @@
|
|||
* MINIMAL_TUPLE_OFFSET bytes before tts_mintuple. This allows column
|
||||
* extraction to treat the case identically to regular physical tuples.
|
||||
*
|
||||
* tts_slow/tts_off are saved state for slot_deform_tuple, and should not
|
||||
* be touched by any other code.
|
||||
* TTS_SLOW flag in tts_flags and tts_off are saved state for
|
||||
* slot_deform_tuple, and should not be touched by any other code.
|
||||
* ----------
|
||||
*/
|
||||
|
||||
/* true = slot is empty */
|
||||
#define TTS_FLAG_EMPTY (1 << 1)
|
||||
#define TTS_EMPTY(slot) (((slot)->tts_flags & TTS_FLAG_EMPTY) != 0)
|
||||
|
||||
/* should pfree tts_tuple? */
|
||||
#define TTS_FLAG_SHOULDFREE (1 << 2)
|
||||
#define TTS_SHOULDFREE(slot) (((slot)->tts_flags & TTS_FLAG_SHOULDFREE) != 0)
|
||||
|
||||
/* should pfree tts_mintuple? */
|
||||
#define TTS_FLAG_SHOULDFREEMIN (1 << 3)
|
||||
#define TTS_SHOULDFREEMIN(slot) (((slot)->tts_flags & TTS_FLAG_SHOULDFREEMIN) != 0)
|
||||
|
||||
/* saved state for slot_deform_tuple */
|
||||
#define TTS_FLAG_SLOW (1 << 4)
|
||||
#define TTS_SLOW(slot) (((slot)->tts_flags & TTS_FLAG_SLOW) != 0)
|
||||
|
||||
/*
|
||||
* openGauss flags
|
||||
*/
|
||||
|
||||
/* should pfree should pfree tts_dataRow? */
|
||||
#define TTS_FLAG_SHOULDFREE_ROW (1 << 12)
|
||||
#define TTS_SHOULDFREE_ROW(slot) (((slot)->tts_flags & TTS_FLAG_SHOULDFREE_ROW) != 0)
|
||||
|
||||
typedef struct TupleTableSlot {
|
||||
NodeTag type;
|
||||
bool tts_isempty; /* true = slot is empty */
|
||||
bool tts_shouldFree; /* should pfree tts_tuple? */
|
||||
bool tts_shouldFreeMin; /* should pfree tts_mintuple? */
|
||||
bool tts_slow; /* saved state for slot_deform_tuple */
|
||||
|
||||
uint16 tts_flags; /* Boolean states */
|
||||
int tts_nvalid; /* # of valid values in tts_values */
|
||||
const TableAmRoutine* tts_tam_ops; /* implementation of table AM */
|
||||
Tuple tts_tuple; /* physical tuple, or NULL if virtual */
|
||||
|
||||
TupleDesc tts_tupleDescriptor; /* slot's tuple descriptor */
|
||||
MemoryContext tts_mcxt; /* slot itself is in this context */
|
||||
Buffer tts_buffer; /* tuple's buffer, or InvalidBuffer */
|
||||
long tts_off; /* saved state for slot_deform_tuple */
|
||||
Datum* tts_values; /* current per-attribute values */
|
||||
bool* tts_isnull; /* current per-attribute isnull flags */
|
||||
|
||||
MinimalTuple tts_mintuple; /* minimal tuple, or NULL if none */
|
||||
HeapTupleData tts_minhdr; /* workspace for minimal-tuple-only case */
|
||||
|
||||
long tts_meta_off; /* saved state for slot_deform_cmpr_tuple */
|
||||
Datum* tts_lobPointers;
|
||||
#ifdef PGXC
|
||||
/*
|
||||
* PGXC extension to support tuples sent from remote Datanode.
|
||||
*/
|
||||
char* tts_dataRow; /* Tuple data in DataRow format */
|
||||
int tts_dataLen; /* Actual length of the data row */
|
||||
bool tts_shouldFreeRow; /* should pfree tts_dataRow? */
|
||||
struct AttInMetadata* tts_attinmeta; /* store here info to extract values from the DataRow */
|
||||
Oid tts_xcnodeoid; /* Oid of node from where the datarow is fetched */
|
||||
MemoryContext tts_per_tuple_mcxt;
|
||||
#endif
|
||||
TupleDesc tts_tupleDescriptor; /* slot's tuple descriptor */
|
||||
MemoryContext tts_mcxt; /* slot itself is in this context */
|
||||
Buffer tts_buffer; /* tuple's buffer, or InvalidBuffer */
|
||||
int tts_nvalid; /* # of valid values in tts_values */
|
||||
Datum* tts_values; /* current per-attribute values */
|
||||
bool* tts_isnull; /* current per-attribute isnull flags */
|
||||
Datum* tts_lobPointers;
|
||||
MinimalTuple tts_mintuple; /* minimal tuple, or NULL if none */
|
||||
HeapTupleData tts_minhdr; /* workspace for minimal-tuple-only case */
|
||||
long tts_off; /* saved state for slot_deform_tuple */
|
||||
long tts_meta_off; /* saved state for slot_deform_cmpr_tuple */
|
||||
TableAmType tts_tupslotTableAm; /* slots's tuple table type */
|
||||
|
||||
} TupleTableSlot;
|
||||
|
||||
#define TTS_HAS_PHYSICAL_TUPLE(slot) ((slot)->tts_tuple != NULL && (slot)->tts_tuple != &((slot)->tts_minhdr))
|
||||
|
||||
|
||||
#define TTS_TABLEAM_IS_HEAP(slot) ((slot)->tts_tam_ops == TableAmHeap)
|
||||
#define TTS_TABLEAM_IS_USTORE(slot) ((slot)->tts_tam_ops == TableAmUstore)
|
||||
|
||||
/*
|
||||
* TupIsNull -- is a TupleTableSlot empty?
|
||||
*/
|
||||
#define TupIsNull(slot) ((slot) == NULL || (slot)->tts_isempty)
|
||||
#define TupIsNull(slot) ((slot) == NULL || TTS_EMPTY(slot))
|
||||
|
||||
/* in executor/execTuples.c */
|
||||
extern TupleTableSlot* MakeTupleTableSlot(bool has_tuple_mcxt = false, TableAmType tupslotTableAm = TAM_HEAP);
|
||||
extern TupleTableSlot* ExecAllocTableSlot(List** tupleTable, TableAmType tupslotTableAm = TAM_HEAP);
|
||||
extern TupleTableSlot* MakeTupleTableSlot(bool has_tuple_mcxt = false, const TableAmRoutine* tam_ops = TableAmHeap);
|
||||
extern TupleTableSlot* ExecAllocTableSlot(List** tupleTable, const TableAmRoutine* tam_ops = TableAmHeap);
|
||||
extern void ExecResetTupleTable(List* tupleTable, bool shouldFree);
|
||||
extern TupleTableSlot* MakeSingleTupleTableSlot(TupleDesc tupdesc, bool allocSlotCxt = false, TableAmType tupslotTableAm = TAM_HEAP);
|
||||
extern TupleTableSlot* MakeSingleTupleTableSlot(TupleDesc tupdesc, bool allocSlotCxt = false, const TableAmRoutine* tam_ops = TableAmHeap);
|
||||
extern void ExecDropSingleTupleTableSlot(TupleTableSlot* slot);
|
||||
extern void ExecSetSlotDescriptor(TupleTableSlot* slot, TupleDesc tupdesc);
|
||||
extern TupleTableSlot* ExecStoreTuple(Tuple tuple, TupleTableSlot* slot, Buffer buffer, bool shouldFree);
|
||||
|
|
|
@ -2787,7 +2787,7 @@ typedef struct GroupingIdExprState {
|
|||
} \
|
||||
} while (0)
|
||||
|
||||
extern TupleTableSlot* ExecMakeTupleSlot(Tuple tuple, TableScanDesc tableScan, TupleTableSlot* slot, TableAmType tableAm);
|
||||
extern TupleTableSlot* ExecMakeTupleSlot(Tuple tuple, TableScanDesc tableScan, TupleTableSlot* slot, const TableAmRoutine* tam_ops);
|
||||
|
||||
/*
|
||||
* When the global partition index is used for bitmap scanning,
|
||||
|
|
Loading…
Reference in New Issue