Index: third_party/sqlite/src/src/rowset.c |
diff --git a/third_party/sqlite/src/src/rowset.c b/third_party/sqlite/src/src/rowset.c |
index ff5593892ab3f5af79ff1d5a768c6529a82fadf9..aa81607b9fdc1864c5d542973fb2b632055995ab 100644 |
--- a/third_party/sqlite/src/src/rowset.c |
+++ b/third_party/sqlite/src/src/rowset.c |
@@ -57,8 +57,9 @@ |
** of the first SMALLEST is O(NlogN). Second and subsequent SMALLEST |
** primitives are constant time. The cost of DESTROY is O(N). |
** |
-** There is an added cost of O(N) when switching between TEST and |
-** SMALLEST primitives. |
+** TEST and SMALLEST may not be used by the same RowSet. This used to |
+** be possible, but the feature was not used, so it was removed in order |
+** to simplify the code. |
*/ |
#include "sqliteInt.h" |
@@ -179,9 +180,11 @@ void sqlite3RowSetClear(RowSet *p){ |
*/ |
static struct RowSetEntry *rowSetEntryAlloc(RowSet *p){ |
assert( p!=0 ); |
- if( p->nFresh==0 ){ |
+ if( p->nFresh==0 ){ /*OPTIMIZATION-IF-FALSE*/ |
+ /* We could allocate a fresh RowSetEntry each time one is needed, but it |
+ ** is more efficient to pull a preallocated entry from the pool */ |
struct RowSetChunk *pNew; |
- pNew = sqlite3DbMallocRaw(p->db, sizeof(*pNew)); |
+ pNew = sqlite3DbMallocRawNN(p->db, sizeof(*pNew)); |
if( pNew==0 ){ |
return 0; |
} |
@@ -213,7 +216,9 @@ void sqlite3RowSetInsert(RowSet *p, i64 rowid){ |
pEntry->pRight = 0; |
pLast = p->pLast; |
if( pLast ){ |
- if( (p->rsFlags & ROWSET_SORTED)!=0 && rowid<=pLast->v ){ |
+ if( rowid<=pLast->v ){ /*OPTIMIZATION-IF-FALSE*/ |
+ /* Avoid unnecessary sorts by preserving the ROWSET_SORTED flags |
+ ** where possible */ |
p->rsFlags &= ~ROWSET_SORTED; |
} |
pLast->pRight = pEntry; |
@@ -237,28 +242,26 @@ static struct RowSetEntry *rowSetEntryMerge( |
struct RowSetEntry *pTail; |
pTail = &head; |
- while( pA && pB ){ |
+ assert( pA!=0 && pB!=0 ); |
+ for(;;){ |
assert( pA->pRight==0 || pA->v<=pA->pRight->v ); |
assert( pB->pRight==0 || pB->v<=pB->pRight->v ); |
- if( pA->v<pB->v ){ |
- pTail->pRight = pA; |
+ if( pA->v<=pB->v ){ |
+ if( pA->v<pB->v ) pTail = pTail->pRight = pA; |
pA = pA->pRight; |
- pTail = pTail->pRight; |
- }else if( pB->v<pA->v ){ |
- pTail->pRight = pB; |
- pB = pB->pRight; |
- pTail = pTail->pRight; |
+ if( pA==0 ){ |
+ pTail->pRight = pB; |
+ break; |
+ } |
}else{ |
- pA = pA->pRight; |
+ pTail = pTail->pRight = pB; |
+ pB = pB->pRight; |
+ if( pB==0 ){ |
+ pTail->pRight = pA; |
+ break; |
+ } |
} |
} |
- if( pA ){ |
- assert( pA->pRight==0 || pA->v<=pA->pRight->v ); |
- pTail->pRight = pA; |
- }else{ |
- assert( pB==0 || pB->pRight==0 || pB->v<=pB->pRight->v ); |
- pTail->pRight = pB; |
- } |
return head.pRight; |
} |
@@ -281,9 +284,10 @@ static struct RowSetEntry *rowSetEntrySort(struct RowSetEntry *pIn){ |
aBucket[i] = pIn; |
pIn = pNext; |
} |
- pIn = 0; |
- for(i=0; i<sizeof(aBucket)/sizeof(aBucket[0]); i++){ |
- pIn = rowSetEntryMerge(pIn, aBucket[i]); |
+ pIn = aBucket[0]; |
+ for(i=1; i<sizeof(aBucket)/sizeof(aBucket[0]); i++){ |
+ if( aBucket[i]==0 ) continue; |
+ pIn = pIn ? rowSetEntryMerge(pIn, aBucket[i]) : aBucket[i]; |
} |
return pIn; |
} |
@@ -335,23 +339,29 @@ static struct RowSetEntry *rowSetNDeepTree( |
){ |
struct RowSetEntry *p; /* Root of the new tree */ |
struct RowSetEntry *pLeft; /* Left subtree */ |
- if( *ppList==0 ){ |
- return 0; |
+ if( *ppList==0 ){ /*OPTIMIZATION-IF-TRUE*/ |
+ /* Prevent unnecessary deep recursion when we run out of entries */ |
+ return 0; |
} |
- if( iDepth==1 ){ |
+ if( iDepth>1 ){ /*OPTIMIZATION-IF-TRUE*/ |
+ /* This branch causes a *balanced* tree to be generated. A valid tree |
+ ** is still generated without this branch, but the tree is wildly |
+ ** unbalanced and inefficient. */ |
+ pLeft = rowSetNDeepTree(ppList, iDepth-1); |
+ p = *ppList; |
+ if( p==0 ){ /*OPTIMIZATION-IF-FALSE*/ |
+ /* It is safe to always return here, but the resulting tree |
+ ** would be unbalanced */ |
+ return pLeft; |
+ } |
+ p->pLeft = pLeft; |
+ *ppList = p->pRight; |
+ p->pRight = rowSetNDeepTree(ppList, iDepth-1); |
+ }else{ |
p = *ppList; |
*ppList = p->pRight; |
p->pLeft = p->pRight = 0; |
- return p; |
- } |
- pLeft = rowSetNDeepTree(ppList, iDepth-1); |
- p = *ppList; |
- if( p==0 ){ |
- return pLeft; |
} |
- p->pLeft = pLeft; |
- *ppList = p->pRight; |
- p->pRight = rowSetNDeepTree(ppList, iDepth-1); |
return p; |
} |
@@ -379,58 +389,36 @@ static struct RowSetEntry *rowSetListToTree(struct RowSetEntry *pList){ |
} |
/* |
-** Take all the entries on p->pEntry and on the trees in p->pForest and |
-** sort them all together into one big ordered list on p->pEntry. |
-** |
-** This routine should only be called once in the life of a RowSet. |
-*/ |
-static void rowSetToList(RowSet *p){ |
- |
- /* This routine is called only once */ |
- assert( p!=0 && (p->rsFlags & ROWSET_NEXT)==0 ); |
- |
- if( (p->rsFlags & ROWSET_SORTED)==0 ){ |
- p->pEntry = rowSetEntrySort(p->pEntry); |
- } |
- |
- /* While this module could theoretically support it, sqlite3RowSetNext() |
- ** is never called after sqlite3RowSetText() for the same RowSet. So |
- ** there is never a forest to deal with. Should this change, simply |
- ** remove the assert() and the #if 0. */ |
- assert( p->pForest==0 ); |
-#if 0 |
- while( p->pForest ){ |
- struct RowSetEntry *pTree = p->pForest->pLeft; |
- if( pTree ){ |
- struct RowSetEntry *pHead, *pTail; |
- rowSetTreeToList(pTree, &pHead, &pTail); |
- p->pEntry = rowSetEntryMerge(p->pEntry, pHead); |
- } |
- p->pForest = p->pForest->pRight; |
- } |
-#endif |
- p->rsFlags |= ROWSET_NEXT; /* Verify this routine is never called again */ |
-} |
- |
-/* |
** Extract the smallest element from the RowSet. |
** Write the element into *pRowid. Return 1 on success. Return |
** 0 if the RowSet is already empty. |
** |
** After this routine has been called, the sqlite3RowSetInsert() |
-** routine may not be called again. |
+** routine may not be called again. |
+** |
+** This routine may not be called after sqlite3RowSetTest() has |
+** been used. Older versions of RowSet allowed that, but as the |
+** capability was not used by the code generator, it was removed |
+** for code economy. |
*/ |
int sqlite3RowSetNext(RowSet *p, i64 *pRowid){ |
assert( p!=0 ); |
+ assert( p->pForest==0 ); /* Cannot be used with sqlite3RowSetText() */ |
/* Merge the forest into a single sorted list on first call */ |
- if( (p->rsFlags & ROWSET_NEXT)==0 ) rowSetToList(p); |
+ if( (p->rsFlags & ROWSET_NEXT)==0 ){ /*OPTIMIZATION-IF-FALSE*/ |
+ if( (p->rsFlags & ROWSET_SORTED)==0 ){ /*OPTIMIZATION-IF-FALSE*/ |
+ p->pEntry = rowSetEntrySort(p->pEntry); |
+ } |
+ p->rsFlags |= ROWSET_SORTED|ROWSET_NEXT; |
+ } |
/* Return the next entry on the list */ |
if( p->pEntry ){ |
*pRowid = p->pEntry->v; |
p->pEntry = p->pEntry->pRight; |
- if( p->pEntry==0 ){ |
+ if( p->pEntry==0 ){ /*OPTIMIZATION-IF-TRUE*/ |
+ /* Free memory immediately, rather than waiting on sqlite3_finalize() */ |
sqlite3RowSetClear(p); |
} |
return 1; |
@@ -453,13 +441,15 @@ int sqlite3RowSetTest(RowSet *pRowSet, int iBatch, sqlite3_int64 iRowid){ |
/* This routine is never called after sqlite3RowSetNext() */ |
assert( pRowSet!=0 && (pRowSet->rsFlags & ROWSET_NEXT)==0 ); |
- /* Sort entries into the forest on the first test of a new batch |
+ /* Sort entries into the forest on the first test of a new batch. |
+ ** To save unnecessary work, only do this when the batch number changes. |
*/ |
- if( iBatch!=pRowSet->iBatch ){ |
+ if( iBatch!=pRowSet->iBatch ){ /*OPTIMIZATION-IF-FALSE*/ |
p = pRowSet->pEntry; |
if( p ){ |
struct RowSetEntry **ppPrevTree = &pRowSet->pForest; |
- if( (pRowSet->rsFlags & ROWSET_SORTED)==0 ){ |
+ if( (pRowSet->rsFlags & ROWSET_SORTED)==0 ){ /*OPTIMIZATION-IF-FALSE*/ |
+ /* Only sort the current set of entiries if they need it */ |
p = rowSetEntrySort(p); |
} |
for(pTree = pRowSet->pForest; pTree; pTree=pTree->pRight){ |