提交 bfbd0f61 编写于 作者: H Heikki Linnakangas

Remove misc unused functions.

Identified by 'cppcheck'.
上级 5aabe7fb
......@@ -68,22 +68,6 @@ static AOCSFileSegInfo **GetAllAOCSFileSegInfo_pg_aocsseg_rel(
Snapshot appendOnlyMetaDataSnapshot,
int32 *totalseg);
AOCSFileSegInfo *
NewAOCSFileSegInfo(int32 segno, int32 nvp)
{
AOCSFileSegInfo *seginfo;
seginfo = (AOCSFileSegInfo *) palloc0(aocsfileseginfo_size(nvp));
seginfo ->segno = segno;
seginfo ->vpinfo.nEntry = nvp;
seginfo ->state = AOSEG_STATE_DEFAULT;
/* New segments are always created in the latest format */
seginfo ->formatversion = AORelationVersion_GetLatest();
return seginfo;
}
void
InsertInitialAOCSFileSegInfo(Relation prel, int32 segno, int32 nvp)
{
......
......@@ -70,20 +70,6 @@ static FileSegInfo **GetAllFileSegInfo_pg_aoseg_rel(char *relationName, Relation
* ------------------------------------------------------------------------
*/
FileSegInfo *
NewFileSegInfo(int segno)
{
FileSegInfo *fsinfo;
fsinfo = (FileSegInfo *) palloc0(sizeof(FileSegInfo));
fsinfo->segno = segno;
fsinfo->state = AOSEG_STATE_DEFAULT;
/* New segments are always created in the latest format */
fsinfo->formatversion = AORelationVersion_GetLatest();
return fsinfo;
}
void
ValidateAppendonlySegmentDataBeforeStorage(int segno)
{
......
......@@ -2130,42 +2130,6 @@ try_redistribute(PlannerInfo *root, CdbpathMfjRel *g, CdbpathMfjRel *o,
return false;
}
void
failIfUpdateTriggers(Oid relid)
{
Relation relation;
/* Suppose we already hold locks before caller */
relation = relation_open(relid, NoLock);
if (relation->rd_rel->relhastriggers)
{
bool found = false;
if (relation->trigdesc == NULL)
RelationBuildTriggers(relation);
if (relation->trigdesc)
{
for (int i = 0; i < relation->trigdesc->numtriggers && !found; i++)
{
Trigger trigger = relation->trigdesc->triggers[i];
found = trigger_enabled(trigger.tgoid) &&
(get_trigger_type(trigger.tgoid) & TRIGGER_TYPE_UPDATE) == TRIGGER_TYPE_UPDATE;
if (found)
break;
}
}
/* GPDB_96_MERGE_FIXME: Why is this not allowed? */
if (found || child_triggers(relation->rd_id, TRIGGER_TYPE_UPDATE))
ereport(ERROR,
(errcode(ERRCODE_GP_FEATURE_NOT_YET),
errmsg("UPDATE on distributed key column not allowed on relation with update triggers")));
}
relation_close(relation, NoLock);
}
/*
* Add a suitable Motion Path so that the input tuples from 'subpath' are
* distributed correctly for insertion into target table.
......
......@@ -308,8 +308,14 @@ ReportSrehResults(CdbSreh *cdbsreh, uint64 total_rejected)
}
}
static void
sendnumrows_internal(int64 numrejected, int64 numcompleted)
/*
* SendNumRows
*
* Using this function the QE sends back to the client QD the number
* of rows that were rejected and completed in this last data load
*/
void
SendNumRows(int64 numrejected, int64 numcompleted)
{
StringInfoData buf;
......@@ -324,30 +330,6 @@ sendnumrows_internal(int64 numrejected, int64 numcompleted)
pq_endmessage(&buf);
}
/*
* SendNumRowsRejected
*
* Using this function the QE sends back to the client QD the number
* of rows that were rejected in this last data load in SREH mode.
*/
void
SendNumRowsRejected(int64 numrejected)
{
sendnumrows_internal(numrejected, 0);
}
/*
* SendNumRows
*
* Using this function the QE sends back to the client QD the number
* of rows that were rejected and completed in this last data load
*/
void
SendNumRows(int64 numrejected, int64 numcompleted)
{
sendnumrows_internal(numrejected, numcompleted);
}
/* Identify the reject limit type */
static RejectLimitCode
GetRejectLimitCode(CdbSreh *cdbsreh)
......
......@@ -351,32 +351,6 @@ VarBlockMakerFinish(
return bufferLen;
}
/*
* Reset the VarBlock maker so it can make another one
* using the same inputs as given to VarBlockMakerInit.
*/
void
VarBlockMakerReset(
VarBlockMaker *varBlockMaker)
{
uint8 *buffer;
Assert(varBlockMaker != NULL);
buffer = (uint8 *) varBlockMaker->header;
memset(buffer, 0, VARBLOCK_HEADER_LEN);
VarBlockSet_version(varBlockMaker->header, InitialVersion);
VarBlockSet_offsetsAreSmall(varBlockMaker->header, true);
varBlockMaker->currentItemLenSum = 0;
varBlockMaker->nextItemPtr = &buffer[VARBLOCK_HEADER_LEN];
varBlockMaker->currentItemCount = 0;
varBlockMaker->maxItemCount = varBlockMaker->tempScratchSpaceLen / 2;
}
static VarBlockByteOffset
VarBlockGetOffset(
VarBlockHeader *header,
......@@ -651,62 +625,6 @@ VarBlockIsValid(
return VarBlockCheckOk;
}
/*
* Given a pointer to a VarBlock with at least VARBLOCK_HEADER_LEN bytes
* present, return the length of the whole block.
*/
VarBlockByteLen
VarBlockLenFromHeader(
uint8 *buffer,
VarBlockByteLen peekLen)
{
VarBlockHeader *header;
VarBlockByteLen itemLenSum;
VarBlockByteOffset offsetToOffsetArray;
int multiplier;
VarBlockByteLen bufferLen;
int headerLen = VARBLOCK_HEADER_LEN;
VarBlockByteLen offsetArrayLen;
Assert(buffer != NULL);
if (peekLen < headerLen)
{
fprintf(stderr, "bufferLen %d minimum %d",
peekLen, headerLen);
exit(1);
}
Assert(peekLen >= VARBLOCK_HEADER_LEN);
header = (VarBlockHeader *) buffer;
itemLenSum = VarBlockGet_itemLenSum(header);
/*
* Start offsetArrays on even boundary.
*/
offsetToOffsetArray = VARBLOCK_HEADER_LEN +
((itemLenSum + 1) / 2) * 2;
if (VarBlockGet_offsetsAreSmall(header))
{
multiplier = 2;
}
else
{
multiplier = VARBLOCK_BYTE_OFFSET_24_LEN;
}
/*
* Round-up to even length.
*/
offsetArrayLen = VarBlockGet_itemCount(header) * multiplier;
offsetArrayLen = ((offsetArrayLen + 1) / 2) * 2;
bufferLen = offsetToOffsetArray + offsetArrayLen;
return bufferLen;
}
/*
* Initialize the VarBlock reader.
*/
......
......@@ -54,8 +54,6 @@
*/
#define MIN_RESOURCEQUEUE_MEMORY_LIMIT_KB (10 * 1024L)
static char *GetResqueueCapability(Oid queueOid, int capabilityIndex);
/* MPP-6923:
* GetResourceTypeByName: find the named resource in pg_resourcetype
*
......@@ -1595,93 +1593,3 @@ get_resqueue_oid(const char *queuename, bool missing_ok)
return oid;
}
/*
* Given a queue id, return its name
*/
char *
GetResqueueName(Oid resqueueOid)
{
Relation rel;
ScanKeyData scankey;
SysScanDesc sscan;
HeapTuple tuple;
char *result;
if (resqueueOid == InvalidOid)
return pstrdup("Unknown");
/* SELECT rsqname FROM pg_resqueue WHERE oid = :1 */
rel = heap_open(ResQueueRelationId, AccessShareLock);
ScanKeyInit(&scankey, ObjectIdAttributeNumber,
BTEqualStrategyNumber, F_OIDEQ,
ObjectIdGetDatum(resqueueOid));
sscan = systable_beginscan(rel, ResQueueOidIndexId, true,
NULL, 1, &scankey);
tuple = systable_getnext(sscan);
/* If we cannot find a resource queue id for any reason */
if (!tuple)
result = pstrdup("Unknown");
else
{
FormData_pg_resqueue *rqform = (FormData_pg_resqueue *) GETSTRUCT(tuple);
result = pstrdup(NameStr(rqform->rsqname));
}
systable_endscan(sscan);
heap_close(rel, AccessShareLock);
return result;
}
/**
* Given a resource queue id, get its priority in text form
*/
char *GetResqueuePriority(Oid queueId)
{
if (queueId == InvalidOid)
return pstrdup("Unknown");
else
return GetResqueueCapability(queueId, PG_RESRCTYPE_PRIORITY);
}
/**
* Given a queueid and a capability index, return the capability value as a string.
* Returns NULL if entry is not found.
* Input:
* queueOid - oid of resource queue
* capabilityIndex - see pg_resqueue.h for values (e.g. PG_RESRCTYPE_PRIORITY)
*/
static char *GetResqueueCapability(Oid queueOid, int capabilityIndex)
{
/* Update this assert if we add more capabilities */
Assert(capabilityIndex <= PG_RESRCTYPE_MEMORY_LIMIT);
Assert(queueOid != InvalidOid);
ListCell *le = NULL;
char *result = NULL;
List *capabilitiesList = GetResqueueCapabilityEntry(queueOid); /* This is a list of lists */
foreach(le, capabilitiesList)
{
Value *key = NULL;
List *entry = (List *) lfirst(le);
Assert(entry);
key = (Value *) linitial(entry);
Assert(IsA(key,Integer)); /* This is resource type id */
if (intVal(key) == capabilityIndex)
{
Value *val = lsecond(entry);
Assert(IsA(val,String));
result = pstrdup(strVal(val));
}
}
list_free(capabilitiesList);
return result;
}
......@@ -628,22 +628,6 @@ GetResGroupCapabilities(Relation rel, Oid groupId, ResGroupCaps *resgroupCaps)
}
}
/*
* GetResGroupMemAuditorForId -- Return the resource group memory auditor
* for a groupId
*/
int32
GetResGroupMemAuditorForId(Oid groupId, LOCKMODE lockmode)
{
ResGroupCaps caps;
Relation pg_resgroupcapability_rel = heap_open(
ResGroupCapabilityRelationId, lockmode);
GetResGroupCapabilities(pg_resgroupcapability_rel, groupId, &caps);
heap_close(pg_resgroupcapability_rel, lockmode);
return caps.memAuditor;
}
/*
* Get resource group id for a role in pg_authid.
*
......
......@@ -967,131 +967,6 @@ cost_bitmap_heap_scan(Path *path, PlannerInfo *root, RelOptInfo *baserel,
path->total_cost = startup_cost + run_cost;
}
/*
* cost_bitmap_appendonly_scan
*
* NOTE: This is a copy of cost_bitmap_heap_scan.
*/
void
cost_bitmap_appendonly_scan(Path *path, PlannerInfo *root, RelOptInfo *baserel,
ParamPathInfo *param_info,
Path *bitmapqual, double loop_count)
{
Cost startup_cost = 0;
Cost run_cost = 0;
Cost indexTotalCost;
Selectivity indexSelectivity;
QualCost qpqual_cost;
Cost cpu_per_tuple;
Cost cost_per_page;
double tuples_fetched;
double pages_fetched;
double spc_seq_page_cost,
spc_random_page_cost;
double T;
/* Should only be applied to base relations */
Assert(IsA(baserel, RelOptInfo));
Assert(baserel->relid > 0);
Assert(baserel->rtekind == RTE_RELATION);
/* Mark the path with the correct row estimate */
if (param_info)
path->rows = param_info->ppi_rows;
else
path->rows = baserel->rows;
if (!enable_bitmapscan)
startup_cost += disable_cost;
/*
* Fetch total cost of obtaining the bitmap, as well as its total
* selectivity.
*/
cost_bitmap_tree_node(bitmapqual, &indexTotalCost, &indexSelectivity);
startup_cost += indexTotalCost;
/* Fetch estimated page costs for tablespace containing table. */
get_tablespace_page_costs(baserel->reltablespace,
&spc_random_page_cost,
&spc_seq_page_cost);
/*
* Estimate number of main-table pages fetched.
*/
tuples_fetched = clamp_row_est(indexSelectivity * baserel->tuples);
T = (baserel->pages > 1) ? (double) baserel->pages : 1.0;
if (loop_count > 1)
{
/*
* For repeated bitmap scans, scale up the number of tuples fetched in
* the Mackert and Lohman formula by the number of scans, so that we
* estimate the number of pages fetched by all the scans. Then
* pro-rate for one scan.
*/
pages_fetched = index_pages_fetched(tuples_fetched * loop_count,
baserel->pages,
get_indexpath_pages(bitmapqual),
root);
pages_fetched /= loop_count;
}
else
{
/*
* For a single scan, the number of heap pages that need to be fetched
* is the same as the Mackert and Lohman formula for the case T <= b
* (ie, no re-reads needed).
*/
pages_fetched = (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
}
if (pages_fetched >= T)
pages_fetched = T;
else
pages_fetched = ceil(pages_fetched);
/*
* For small numbers of pages we should charge spc_random_page_cost
* apiece, while if nearly all the table's pages are being read, it's more
* appropriate to charge spc_seq_page_cost apiece. The effect is
* nonlinear, too. For lack of a better idea, interpolate like this to
* determine the cost per page.
*/
if (pages_fetched >= 2.0)
cost_per_page = spc_random_page_cost -
(spc_random_page_cost - spc_seq_page_cost)
* sqrt(pages_fetched / T);
else
cost_per_page = spc_random_page_cost;
run_cost += pages_fetched * cost_per_page;
/*
* Estimate CPU costs per tuple.
*
* Often the indexquals don't need to be rechecked at each tuple ... but
* not always, especially not if there are enough tuples involved that the
* bitmaps become lossy. For the moment, just assume they will be
* rechecked always. This means we charge the full freight for all the
* scan clauses.
*/
get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
startup_cost += qpqual_cost.startup;
cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
run_cost += cpu_per_tuple * tuples_fetched;
/* tlist eval costs are paid per output row, not per tuple scanned */
startup_cost += path->pathtarget->cost.startup;
run_cost += path->pathtarget->cost.per_tuple * path->rows;
path->startup_cost = startup_cost;
path->total_cost = startup_cost + run_cost;
}
/*
* cost_bitmap_tree_node
* Extract cost and selectivity from a bitmap tree node (index/and/or)
......
......@@ -516,26 +516,6 @@ add_restrictinfos(PlannerInfo *root, DynamicScanInfo *dsinfo, Bitmapset *childre
}
}
RestrictInfo *
make_mergeclause(Node *outer, Node *inner)
{
OpExpr *opxpr;
Expr *xpr;
RestrictInfo *rinfo;
opxpr = (OpExpr *) make_op(NULL, list_make1(makeString("=")),
outer,
inner, -1);
opxpr->xpr.type = T_DistinctExpr;
xpr = make_notclause((Expr *) opxpr);
rinfo = make_restrictinfo(xpr, false, false, false, NULL, NULL, NULL);
rinfo->mergeopfamilies = get_mergejoin_opfamilies(opxpr->opno);
return rinfo;
}
/*
* Does the given expression correspond to a var on partitioned relation.
* This function ignores relabeling wrappers
......
......@@ -5609,77 +5609,6 @@ bool is_builtin_true_equality_between_same_type(int opno)
}
}
/**
* Returns true if the equality operator with the given opno
* values is an equality operator, with same type on both sides
* (unlike int24 equality) AND the type being compare is greenplum hashable
*
*
* Note that this function is conservative with regard to when it returns true:
* it is okay to have some greenplum hashtable types that don't have entries here
* (this function may return false even if this is a comparison between
* a greenplum hashable type and itself
*
* Note also that i think it might be possible for this to return true even
* if the operands of the operator are not themselves greenplum hashable,
* because of type conversion or something. I'm not 100% sure on that.
*/
bool
is_builtin_greenplum_hashable_equality_between_same_type(int opno)
{
switch(opno)
{
case BitEqualOperator:
case BooleanEqualOperator:
case BPCharEqualOperator:
case CashEqualOperator:
case CharEqualOperator:
case DateEqualOperator:
case Float4EqualOperator:
case Float8EqualOperator:
case Int2EqualOperator:
case Int4EqualOperator:
case Int8EqualOperator:
case IntervalEqualOperator:
case NameEqualOperator:
case NumericEqualOperator:
case OidEqualOperator:
case RelTimeEqualOperator:
case TextEqualOperator:
case TIDEqualOperator:
case TimeEqualOperator:
case TimestampEqualOperator:
case TimestampTZEqualOperator:
case TimeTZEqualOperator:
/* these new ones were added to list for MPP-7858 */
case AbsTimeEqualOperator:
case ByteaEqualOperator:
case InetEqualOperator: /* for inet and cidr */
case MacAddrEqualOperator:
case TIntervalEqualOperator:
case VarbitEqualOperator:
return true;
/*
these types are greenplum hashable but haven't checked the semantics of these types function
case ACLITEMOID:
case ANYARRAYOID:
case INT2VECTOROID:
case OIDVECTOROID:
case REGPROCOID:
case REGPROCEDUREOID:
case REGOPEROID:
case REGOPERATOROID:
case REGCLASSOID:
case REGTYPEOID:
*/
default:
return false;
}
}
/**
* Structs and Methods to support searching of matching subexpressions.
*/
......
......@@ -6,14 +6,6 @@
#include "cmockery.h"
int idle_session_timeout_action_calls = 0;
void
idle_session_timeout_action_spy()
{
idle_session_timeout_action_calls++;
}
int
returns1000_stub(void)
{
......
......@@ -519,16 +519,6 @@ gp_hll_add_hash_dense(GpHLLCounter hloglog, uint64_t hash)
return hloglog;
}
/* Just reset the counter (set all the counters to 0). We do this by
* zeroing the data array */
void
gp_hll_reset_internal(GpHLLCounter hloglog)
{
memset(hloglog->data, 0, VARSIZE_ANY(hloglog) - sizeof(GpHLLData) );
}
/* Compress header function */
GpHLLCounter
gp_hll_compress(GpHLLCounter hloglog)
......
......@@ -1234,39 +1234,6 @@ bool ntuplestore_acc_current_tupleslot(NTupleStoreAccessor *tsa, TupleTableSlot
return true;
}
bool ntuplestore_acc_current_data(NTupleStoreAccessor *tsa, void **data, int *len)
{
bool fOK = ntuplestore_acc_current_data_internal(tsa, (void **) data, len);
if(!fOK)
{
return false;
}
if(*len < 0)
{
NTupleStoreLobRef *plobref = (NTupleStoreLobRef *) (*data);
Assert(*len == -(int)sizeof(NTupleStoreLobRef));
if (tsa->tmp_len < plobref->size)
{
if (tsa->tmp_lob)
pfree(tsa->tmp_lob);
tsa->tmp_lob = MemoryContextAlloc(tsa->store->mcxt, plobref->size);
tsa->tmp_len = plobref->size;
}
*data = tsa->tmp_lob;
*len = ntuplestore_get_lob(tsa->store, *data, plobref);
Assert(*len == plobref->size);
return true;
}
return true;
}
bool ntuplestore_acc_tell(NTupleStoreAccessor *tsa, NTupleStorePos *pos)
{
AssertImply(tsa->pos.blockn==-1, tsa->pos.slotn==-1);
......
......@@ -150,7 +150,6 @@ extern int64 GetAOCSTotalBytes(
extern FileSegTotals *GetAOCSSSegFilesTotals(Relation parentrel,
Snapshot appendOnlyMetaDataSnapshot);
extern AOCSFileSegInfo *NewAOCSFileSegInfo(int32 segno, int32 nvp);
extern void InsertInitialAOCSFileSegInfo(Relation prel, int32 segno, int32 nvp);
extern void UpdateAOCSFileSegInfo(struct AOCSInsertDescData *desc);
extern void AOCSFileSegInfoAddVpe(
......
......@@ -118,8 +118,6 @@ typedef struct FileSegTotals
* values */
} FileSegTotals;
extern FileSegInfo *NewFileSegInfo(int segno);
extern void InsertInitialSegnoEntry(Relation parentrel, int segno);
extern void ValidateAppendonlySegmentDataBeforeStorage(int segno);
......
......@@ -59,6 +59,4 @@ cdbpath_motion_for_join(PlannerInfo *root,
extern bool cdbpath_contains_wts(Path *path);
extern void failIfUpdateTriggers(Oid relid);
#endif /* CDBPATH_H */
......@@ -88,7 +88,6 @@ extern void destroyCdbSreh(CdbSreh *cdbsreh);
extern void HandleSingleRowError(CdbSreh *cdbsreh);
extern void ReportSrehResults(CdbSreh *cdbsreh, uint64 total_rejected);
extern void SendNumRows(int64 numrejected, int64 numcompleted);
extern void SendNumRowsRejected(int64 numrejected);
extern void ErrorIfRejectLimitReached(CdbSreh *cdbsreh);
extern bool ExceedSegmentRejectHardLimit(CdbSreh *cdbsreh);
extern bool IsRejectLimitReached(CdbSreh *cdbsreh);
......
......@@ -308,13 +308,6 @@ extern int VarBlockMakerItemCount(
extern VarBlockByteLen VarBlockMakerFinish(
VarBlockMaker *varBlockMaker);
/*
* Reset the VarBlock maker so it can make another one
* using the same inputs as given to VarBlockMakerInit.
*/
extern void VarBlockMakerReset(
VarBlockMaker *varBlockMaker);
// -----------------------------------------------------------------------------
/*
......@@ -340,14 +333,6 @@ extern VarBlockCheckError VarBlockIsValid(
*/
char *VarBlockGetCheckErrorStr(void);
/*
* Given a pointer to a VarBlock with at least VARBLOCK_HEADER_LEN bytes
* present, return the length of the whole block.
*/
VarBlockByteLen VarBlockLenFromHeader(
uint8 *buffer,
VarBlockByteLen peekLen);
// -----------------------------------------------------------------------------
/*
......
......@@ -23,8 +23,6 @@ GetResqueueCapabilityEntry(Oid queueid);
extern void CreateQueue(CreateQueueStmt *stmt);
extern void AlterQueue(AlterQueueStmt *stmt);
extern void DropQueue(DropQueueStmt *stmt);
extern char *GetResqueueName(Oid resqueueOid);
extern char *GetResqueuePriority(Oid queueId);
extern Oid get_resqueue_oid(const char *queuename, bool missing_ok);
#endif /* QUEUE_H */
......@@ -40,6 +40,4 @@ extern void GetResGroupCapabilities(Relation rel,
ResGroupCaps *resgroupCaps);
extern void ResGroupCheckForRole(Oid groupId);
extern int32 GetResGroupMemAuditorForId(Oid groupId, LOCKMODE lockmode);
#endif /* RESGROUPCMDS_H */
......@@ -105,7 +105,6 @@ extern Expr *evaluate_expr(Expr *expr, Oid result_type, int32 result_typmod,
Oid result_collation);
extern bool is_builtin_true_equality_between_same_type(int opno);
extern bool is_builtin_greenplum_hashable_equality_between_same_type(int opno);
extern bool subexpression_match(Expr *expr1, Expr *expr2);
......
......@@ -101,12 +101,6 @@ extern void cost_index(IndexPath *path, PlannerInfo *root,
extern void cost_bitmap_heap_scan(Path *path, PlannerInfo *root, RelOptInfo *baserel,
ParamPathInfo *param_info,
Path *bitmapqual, double loop_count);
/* GPDB_92_MERGE_FIXME: Suspect we need to add ParamPathInfo for some scans below. */
extern void cost_bitmap_appendonly_scan(Path *path, PlannerInfo *root, RelOptInfo *baserel,
ParamPathInfo *param_info,
Path *bitmapqual, double loop_count);
extern void cost_bitmap_table_scan(Path *path, PlannerInfo *root, RelOptInfo *baserel,
Path *bitmapqual, RelOptInfo *outer_rel);
extern void cost_bitmap_and_node(BitmapAndPath *path, PlannerInfo *root);
extern void cost_bitmap_or_node(BitmapOrPath *path, PlannerInfo *root);
extern void cost_bitmap_tree_node(Path *path, Cost *cost, Selectivity *selec);
......
......@@ -25,6 +25,4 @@ extern bool inject_partition_selectors_for_join(PlannerInfo *root,
extern Plan *create_partition_selector_plan(PlannerInfo *root, PartitionSelectorPath *pspath);
extern RestrictInfo *make_mergeclause(Node *outer, Node *inner);
#endif /* PLANPARTITION_H */
......@@ -189,9 +189,6 @@ GpHLLCounter gp_hll_add_element(GpHLLCounter hloglog, const char * element, int
/* get an estimate from the hyperloglog counter */
double gp_hll_estimate(GpHLLCounter hloglog);
/* reset a counter */
void gp_hll_reset_internal(GpHLLCounter hloglog);
/* data compression/decompression */
GpHLLCounter gp_hll_compress(GpHLLCounter hloglog);
GpHLLCounter gp_hll_decompress(GpHLLCounter hloglog);
......
......@@ -51,7 +51,6 @@ extern bool ntuplestore_acc_advance(NTupleStoreAccessor *tsa, int n);
* to make sure trim does not trim too far ahead
*/
extern bool ntuplestore_acc_current_tupleslot(NTupleStoreAccessor *tsa, TupleTableSlot *slot);
extern bool ntuplestore_acc_current_data(NTupleStoreAccessor *tsa, void **data, int *len);
/* Tell/seek position of accessor. */
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册