OLD | NEW |
1 /* | 1 /* |
2 ** 2008 December 3 | 2 ** 2008 December 3 |
3 ** | 3 ** |
4 ** The author disclaims copyright to this source code. In place of | 4 ** The author disclaims copyright to this source code. In place of |
5 ** a legal notice, here is a blessing: | 5 ** a legal notice, here is a blessing: |
6 ** | 6 ** |
7 ** May you do good and not evil. | 7 ** May you do good and not evil. |
8 ** May you find forgiveness for yourself and forgive others. | 8 ** May you find forgiveness for yourself and forgive others. |
9 ** May you share freely, never taking more than you give. | 9 ** May you share freely, never taking more than you give. |
10 ** | 10 ** |
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
50 ** No INSERTs may occurs after a SMALLEST. An assertion will fail if | 50 ** No INSERTs may occurs after a SMALLEST. An assertion will fail if |
51 ** that is attempted. | 51 ** that is attempted. |
52 ** | 52 ** |
53 ** The cost of an INSERT is roughly constant. (Sometimes new memory | 53 ** The cost of an INSERT is roughly constant. (Sometimes new memory |
54 ** has to be allocated on an INSERT.) The cost of a TEST with a new | 54 ** has to be allocated on an INSERT.) The cost of a TEST with a new |
55 ** batch number is O(NlogN) where N is the number of elements in the RowSet. | 55 ** batch number is O(NlogN) where N is the number of elements in the RowSet. |
56 ** The cost of a TEST using the same batch number is O(logN). The cost | 56 ** The cost of a TEST using the same batch number is O(logN). The cost |
57 ** of the first SMALLEST is O(NlogN). Second and subsequent SMALLEST | 57 ** of the first SMALLEST is O(NlogN). Second and subsequent SMALLEST |
58 ** primitives are constant time. The cost of DESTROY is O(N). | 58 ** primitives are constant time. The cost of DESTROY is O(N). |
59 ** | 59 ** |
60 ** There is an added cost of O(N) when switching between TEST and | 60 ** TEST and SMALLEST may not be used by the same RowSet. This used to |
61 ** SMALLEST primitives. | 61 ** be possible, but the feature was not used, so it was removed in order |
| 62 ** to simplify the code. |
62 */ | 63 */ |
63 #include "sqliteInt.h" | 64 #include "sqliteInt.h" |
64 | 65 |
65 | 66 |
66 /* | 67 /* |
67 ** Target size for allocation chunks. | 68 ** Target size for allocation chunks. |
68 */ | 69 */ |
69 #define ROWSET_ALLOCATION_SIZE 1024 | 70 #define ROWSET_ALLOCATION_SIZE 1024 |
70 | 71 |
71 /* | 72 /* |
(...skipping 100 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
172 /* | 173 /* |
173 ** Allocate a new RowSetEntry object that is associated with the | 174 ** Allocate a new RowSetEntry object that is associated with the |
174 ** given RowSet. Return a pointer to the new and completely uninitialized | 175 ** given RowSet. Return a pointer to the new and completely uninitialized |
175 ** objected. | 176 ** objected. |
176 ** | 177 ** |
177 ** In an OOM situation, the RowSet.db->mallocFailed flag is set and this | 178 ** In an OOM situation, the RowSet.db->mallocFailed flag is set and this |
178 ** routine returns NULL. | 179 ** routine returns NULL. |
179 */ | 180 */ |
180 static struct RowSetEntry *rowSetEntryAlloc(RowSet *p){ | 181 static struct RowSetEntry *rowSetEntryAlloc(RowSet *p){ |
181 assert( p!=0 ); | 182 assert( p!=0 ); |
182 if( p->nFresh==0 ){ | 183 if( p->nFresh==0 ){ /*OPTIMIZATION-IF-FALSE*/ |
| 184 /* We could allocate a fresh RowSetEntry each time one is needed, but it |
| 185 ** is more efficient to pull a preallocated entry from the pool */ |
183 struct RowSetChunk *pNew; | 186 struct RowSetChunk *pNew; |
184 pNew = sqlite3DbMallocRaw(p->db, sizeof(*pNew)); | 187 pNew = sqlite3DbMallocRawNN(p->db, sizeof(*pNew)); |
185 if( pNew==0 ){ | 188 if( pNew==0 ){ |
186 return 0; | 189 return 0; |
187 } | 190 } |
188 pNew->pNextChunk = p->pChunk; | 191 pNew->pNextChunk = p->pChunk; |
189 p->pChunk = pNew; | 192 p->pChunk = pNew; |
190 p->pFresh = pNew->aEntry; | 193 p->pFresh = pNew->aEntry; |
191 p->nFresh = ROWSET_ENTRY_PER_CHUNK; | 194 p->nFresh = ROWSET_ENTRY_PER_CHUNK; |
192 } | 195 } |
193 p->nFresh--; | 196 p->nFresh--; |
194 return p->pFresh++; | 197 return p->pFresh++; |
(...skipping 11 matching lines...) Expand all Loading... |
206 | 209 |
207 /* This routine is never called after sqlite3RowSetNext() */ | 210 /* This routine is never called after sqlite3RowSetNext() */ |
208 assert( p!=0 && (p->rsFlags & ROWSET_NEXT)==0 ); | 211 assert( p!=0 && (p->rsFlags & ROWSET_NEXT)==0 ); |
209 | 212 |
210 pEntry = rowSetEntryAlloc(p); | 213 pEntry = rowSetEntryAlloc(p); |
211 if( pEntry==0 ) return; | 214 if( pEntry==0 ) return; |
212 pEntry->v = rowid; | 215 pEntry->v = rowid; |
213 pEntry->pRight = 0; | 216 pEntry->pRight = 0; |
214 pLast = p->pLast; | 217 pLast = p->pLast; |
215 if( pLast ){ | 218 if( pLast ){ |
216 if( (p->rsFlags & ROWSET_SORTED)!=0 && rowid<=pLast->v ){ | 219 if( rowid<=pLast->v ){ /*OPTIMIZATION-IF-FALSE*/ |
| 220 /* Avoid unnecessary sorts by preserving the ROWSET_SORTED flags |
| 221 ** where possible */ |
217 p->rsFlags &= ~ROWSET_SORTED; | 222 p->rsFlags &= ~ROWSET_SORTED; |
218 } | 223 } |
219 pLast->pRight = pEntry; | 224 pLast->pRight = pEntry; |
220 }else{ | 225 }else{ |
221 p->pEntry = pEntry; | 226 p->pEntry = pEntry; |
222 } | 227 } |
223 p->pLast = pEntry; | 228 p->pLast = pEntry; |
224 } | 229 } |
225 | 230 |
226 /* | 231 /* |
227 ** Merge two lists of RowSetEntry objects. Remove duplicates. | 232 ** Merge two lists of RowSetEntry objects. Remove duplicates. |
228 ** | 233 ** |
229 ** The input lists are connected via pRight pointers and are | 234 ** The input lists are connected via pRight pointers and are |
230 ** assumed to each already be in sorted order. | 235 ** assumed to each already be in sorted order. |
231 */ | 236 */ |
232 static struct RowSetEntry *rowSetEntryMerge( | 237 static struct RowSetEntry *rowSetEntryMerge( |
233 struct RowSetEntry *pA, /* First sorted list to be merged */ | 238 struct RowSetEntry *pA, /* First sorted list to be merged */ |
234 struct RowSetEntry *pB /* Second sorted list to be merged */ | 239 struct RowSetEntry *pB /* Second sorted list to be merged */ |
235 ){ | 240 ){ |
236 struct RowSetEntry head; | 241 struct RowSetEntry head; |
237 struct RowSetEntry *pTail; | 242 struct RowSetEntry *pTail; |
238 | 243 |
239 pTail = &head; | 244 pTail = &head; |
240 while( pA && pB ){ | 245 assert( pA!=0 && pB!=0 ); |
| 246 for(;;){ |
241 assert( pA->pRight==0 || pA->v<=pA->pRight->v ); | 247 assert( pA->pRight==0 || pA->v<=pA->pRight->v ); |
242 assert( pB->pRight==0 || pB->v<=pB->pRight->v ); | 248 assert( pB->pRight==0 || pB->v<=pB->pRight->v ); |
243 if( pA->v<pB->v ){ | 249 if( pA->v<=pB->v ){ |
244 pTail->pRight = pA; | 250 if( pA->v<pB->v ) pTail = pTail->pRight = pA; |
245 pA = pA->pRight; | 251 pA = pA->pRight; |
246 pTail = pTail->pRight; | 252 if( pA==0 ){ |
247 }else if( pB->v<pA->v ){ | 253 pTail->pRight = pB; |
248 pTail->pRight = pB; | 254 break; |
| 255 } |
| 256 }else{ |
| 257 pTail = pTail->pRight = pB; |
249 pB = pB->pRight; | 258 pB = pB->pRight; |
250 pTail = pTail->pRight; | 259 if( pB==0 ){ |
251 }else{ | 260 pTail->pRight = pA; |
252 pA = pA->pRight; | 261 break; |
| 262 } |
253 } | 263 } |
254 } | 264 } |
255 if( pA ){ | |
256 assert( pA->pRight==0 || pA->v<=pA->pRight->v ); | |
257 pTail->pRight = pA; | |
258 }else{ | |
259 assert( pB==0 || pB->pRight==0 || pB->v<=pB->pRight->v ); | |
260 pTail->pRight = pB; | |
261 } | |
262 return head.pRight; | 265 return head.pRight; |
263 } | 266 } |
264 | 267 |
265 /* | 268 /* |
266 ** Sort all elements on the list of RowSetEntry objects into order of | 269 ** Sort all elements on the list of RowSetEntry objects into order of |
267 ** increasing v. | 270 ** increasing v. |
268 */ | 271 */ |
269 static struct RowSetEntry *rowSetEntrySort(struct RowSetEntry *pIn){ | 272 static struct RowSetEntry *rowSetEntrySort(struct RowSetEntry *pIn){ |
270 unsigned int i; | 273 unsigned int i; |
271 struct RowSetEntry *pNext, *aBucket[40]; | 274 struct RowSetEntry *pNext, *aBucket[40]; |
272 | 275 |
273 memset(aBucket, 0, sizeof(aBucket)); | 276 memset(aBucket, 0, sizeof(aBucket)); |
274 while( pIn ){ | 277 while( pIn ){ |
275 pNext = pIn->pRight; | 278 pNext = pIn->pRight; |
276 pIn->pRight = 0; | 279 pIn->pRight = 0; |
277 for(i=0; aBucket[i]; i++){ | 280 for(i=0; aBucket[i]; i++){ |
278 pIn = rowSetEntryMerge(aBucket[i], pIn); | 281 pIn = rowSetEntryMerge(aBucket[i], pIn); |
279 aBucket[i] = 0; | 282 aBucket[i] = 0; |
280 } | 283 } |
281 aBucket[i] = pIn; | 284 aBucket[i] = pIn; |
282 pIn = pNext; | 285 pIn = pNext; |
283 } | 286 } |
284 pIn = 0; | 287 pIn = aBucket[0]; |
285 for(i=0; i<sizeof(aBucket)/sizeof(aBucket[0]); i++){ | 288 for(i=1; i<sizeof(aBucket)/sizeof(aBucket[0]); i++){ |
286 pIn = rowSetEntryMerge(pIn, aBucket[i]); | 289 if( aBucket[i]==0 ) continue; |
| 290 pIn = pIn ? rowSetEntryMerge(pIn, aBucket[i]) : aBucket[i]; |
287 } | 291 } |
288 return pIn; | 292 return pIn; |
289 } | 293 } |
290 | 294 |
291 | 295 |
292 /* | 296 /* |
293 ** The input, pIn, is a binary tree (or subtree) of RowSetEntry objects. | 297 ** The input, pIn, is a binary tree (or subtree) of RowSetEntry objects. |
294 ** Convert this tree into a linked list connected by the pRight pointers | 298 ** Convert this tree into a linked list connected by the pRight pointers |
295 ** and return pointers to the first and last elements of the new list. | 299 ** and return pointers to the first and last elements of the new list. |
296 */ | 300 */ |
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
328 ** and leave *ppList set to NULL. | 332 ** and leave *ppList set to NULL. |
329 ** | 333 ** |
330 ** Return a pointer to the root of the constructed binary tree. | 334 ** Return a pointer to the root of the constructed binary tree. |
331 */ | 335 */ |
332 static struct RowSetEntry *rowSetNDeepTree( | 336 static struct RowSetEntry *rowSetNDeepTree( |
333 struct RowSetEntry **ppList, | 337 struct RowSetEntry **ppList, |
334 int iDepth | 338 int iDepth |
335 ){ | 339 ){ |
336 struct RowSetEntry *p; /* Root of the new tree */ | 340 struct RowSetEntry *p; /* Root of the new tree */ |
337 struct RowSetEntry *pLeft; /* Left subtree */ | 341 struct RowSetEntry *pLeft; /* Left subtree */ |
338 if( *ppList==0 ){ | 342 if( *ppList==0 ){ /*OPTIMIZATION-IF-TRUE*/ |
339 return 0; | 343 /* Prevent unnecessary deep recursion when we run out of entries */ |
| 344 return 0; |
340 } | 345 } |
341 if( iDepth==1 ){ | 346 if( iDepth>1 ){ /*OPTIMIZATION-IF-TRUE*/ |
| 347 /* This branch causes a *balanced* tree to be generated. A valid tree |
| 348 ** is still generated without this branch, but the tree is wildly |
| 349 ** unbalanced and inefficient. */ |
| 350 pLeft = rowSetNDeepTree(ppList, iDepth-1); |
| 351 p = *ppList; |
| 352 if( p==0 ){ /*OPTIMIZATION-IF-FALSE*/ |
| 353 /* It is safe to always return here, but the resulting tree |
| 354 ** would be unbalanced */ |
| 355 return pLeft; |
| 356 } |
| 357 p->pLeft = pLeft; |
| 358 *ppList = p->pRight; |
| 359 p->pRight = rowSetNDeepTree(ppList, iDepth-1); |
| 360 }else{ |
342 p = *ppList; | 361 p = *ppList; |
343 *ppList = p->pRight; | 362 *ppList = p->pRight; |
344 p->pLeft = p->pRight = 0; | 363 p->pLeft = p->pRight = 0; |
345 return p; | |
346 } | 364 } |
347 pLeft = rowSetNDeepTree(ppList, iDepth-1); | |
348 p = *ppList; | |
349 if( p==0 ){ | |
350 return pLeft; | |
351 } | |
352 p->pLeft = pLeft; | |
353 *ppList = p->pRight; | |
354 p->pRight = rowSetNDeepTree(ppList, iDepth-1); | |
355 return p; | 365 return p; |
356 } | 366 } |
357 | 367 |
358 /* | 368 /* |
359 ** Convert a sorted list of elements into a binary tree. Make the tree | 369 ** Convert a sorted list of elements into a binary tree. Make the tree |
360 ** as deep as it needs to be in order to contain the entire list. | 370 ** as deep as it needs to be in order to contain the entire list. |
361 */ | 371 */ |
362 static struct RowSetEntry *rowSetListToTree(struct RowSetEntry *pList){ | 372 static struct RowSetEntry *rowSetListToTree(struct RowSetEntry *pList){ |
363 int iDepth; /* Depth of the tree so far */ | 373 int iDepth; /* Depth of the tree so far */ |
364 struct RowSetEntry *p; /* Current tree root */ | 374 struct RowSetEntry *p; /* Current tree root */ |
365 struct RowSetEntry *pLeft; /* Left subtree */ | 375 struct RowSetEntry *pLeft; /* Left subtree */ |
366 | 376 |
367 assert( pList!=0 ); | 377 assert( pList!=0 ); |
368 p = pList; | 378 p = pList; |
369 pList = p->pRight; | 379 pList = p->pRight; |
370 p->pLeft = p->pRight = 0; | 380 p->pLeft = p->pRight = 0; |
371 for(iDepth=1; pList; iDepth++){ | 381 for(iDepth=1; pList; iDepth++){ |
372 pLeft = p; | 382 pLeft = p; |
373 p = pList; | 383 p = pList; |
374 pList = p->pRight; | 384 pList = p->pRight; |
375 p->pLeft = pLeft; | 385 p->pLeft = pLeft; |
376 p->pRight = rowSetNDeepTree(&pList, iDepth); | 386 p->pRight = rowSetNDeepTree(&pList, iDepth); |
377 } | 387 } |
378 return p; | 388 return p; |
379 } | 389 } |
380 | 390 |
381 /* | 391 /* |
382 ** Take all the entries on p->pEntry and on the trees in p->pForest and | |
383 ** sort them all together into one big ordered list on p->pEntry. | |
384 ** | |
385 ** This routine should only be called once in the life of a RowSet. | |
386 */ | |
387 static void rowSetToList(RowSet *p){ | |
388 | |
389 /* This routine is called only once */ | |
390 assert( p!=0 && (p->rsFlags & ROWSET_NEXT)==0 ); | |
391 | |
392 if( (p->rsFlags & ROWSET_SORTED)==0 ){ | |
393 p->pEntry = rowSetEntrySort(p->pEntry); | |
394 } | |
395 | |
396 /* While this module could theoretically support it, sqlite3RowSetNext() | |
397 ** is never called after sqlite3RowSetText() for the same RowSet. So | |
398 ** there is never a forest to deal with. Should this change, simply | |
399 ** remove the assert() and the #if 0. */ | |
400 assert( p->pForest==0 ); | |
401 #if 0 | |
402 while( p->pForest ){ | |
403 struct RowSetEntry *pTree = p->pForest->pLeft; | |
404 if( pTree ){ | |
405 struct RowSetEntry *pHead, *pTail; | |
406 rowSetTreeToList(pTree, &pHead, &pTail); | |
407 p->pEntry = rowSetEntryMerge(p->pEntry, pHead); | |
408 } | |
409 p->pForest = p->pForest->pRight; | |
410 } | |
411 #endif | |
412 p->rsFlags |= ROWSET_NEXT; /* Verify this routine is never called again */ | |
413 } | |
414 | |
415 /* | |
416 ** Extract the smallest element from the RowSet. | 392 ** Extract the smallest element from the RowSet. |
417 ** Write the element into *pRowid. Return 1 on success. Return | 393 ** Write the element into *pRowid. Return 1 on success. Return |
418 ** 0 if the RowSet is already empty. | 394 ** 0 if the RowSet is already empty. |
419 ** | 395 ** |
420 ** After this routine has been called, the sqlite3RowSetInsert() | 396 ** After this routine has been called, the sqlite3RowSetInsert() |
421 ** routine may not be called again. | 397 ** routine may not be called again. |
| 398 ** |
| 399 ** This routine may not be called after sqlite3RowSetTest() has |
| 400 ** been used. Older versions of RowSet allowed that, but as the |
| 401 ** capability was not used by the code generator, it was removed |
| 402 ** for code economy. |
422 */ | 403 */ |
423 int sqlite3RowSetNext(RowSet *p, i64 *pRowid){ | 404 int sqlite3RowSetNext(RowSet *p, i64 *pRowid){ |
424 assert( p!=0 ); | 405 assert( p!=0 ); |
| 406 assert( p->pForest==0 ); /* Cannot be used with sqlite3RowSetText() */ |
425 | 407 |
426 /* Merge the forest into a single sorted list on first call */ | 408 /* Merge the forest into a single sorted list on first call */ |
427 if( (p->rsFlags & ROWSET_NEXT)==0 ) rowSetToList(p); | 409 if( (p->rsFlags & ROWSET_NEXT)==0 ){ /*OPTIMIZATION-IF-FALSE*/ |
| 410 if( (p->rsFlags & ROWSET_SORTED)==0 ){ /*OPTIMIZATION-IF-FALSE*/ |
| 411 p->pEntry = rowSetEntrySort(p->pEntry); |
| 412 } |
| 413 p->rsFlags |= ROWSET_SORTED|ROWSET_NEXT; |
| 414 } |
428 | 415 |
429 /* Return the next entry on the list */ | 416 /* Return the next entry on the list */ |
430 if( p->pEntry ){ | 417 if( p->pEntry ){ |
431 *pRowid = p->pEntry->v; | 418 *pRowid = p->pEntry->v; |
432 p->pEntry = p->pEntry->pRight; | 419 p->pEntry = p->pEntry->pRight; |
433 if( p->pEntry==0 ){ | 420 if( p->pEntry==0 ){ /*OPTIMIZATION-IF-TRUE*/ |
| 421 /* Free memory immediately, rather than waiting on sqlite3_finalize() */ |
434 sqlite3RowSetClear(p); | 422 sqlite3RowSetClear(p); |
435 } | 423 } |
436 return 1; | 424 return 1; |
437 }else{ | 425 }else{ |
438 return 0; | 426 return 0; |
439 } | 427 } |
440 } | 428 } |
441 | 429 |
442 /* | 430 /* |
443 ** Check to see if element iRowid was inserted into the rowset as | 431 ** Check to see if element iRowid was inserted into the rowset as |
444 ** part of any insert batch prior to iBatch. Return 1 or 0. | 432 ** part of any insert batch prior to iBatch. Return 1 or 0. |
445 ** | 433 ** |
446 ** If this is the first test of a new batch and if there exist entries | 434 ** If this is the first test of a new batch and if there exist entries |
447 ** on pRowSet->pEntry, then sort those entries into the forest at | 435 ** on pRowSet->pEntry, then sort those entries into the forest at |
448 ** pRowSet->pForest so that they can be tested. | 436 ** pRowSet->pForest so that they can be tested. |
449 */ | 437 */ |
450 int sqlite3RowSetTest(RowSet *pRowSet, int iBatch, sqlite3_int64 iRowid){ | 438 int sqlite3RowSetTest(RowSet *pRowSet, int iBatch, sqlite3_int64 iRowid){ |
451 struct RowSetEntry *p, *pTree; | 439 struct RowSetEntry *p, *pTree; |
452 | 440 |
453 /* This routine is never called after sqlite3RowSetNext() */ | 441 /* This routine is never called after sqlite3RowSetNext() */ |
454 assert( pRowSet!=0 && (pRowSet->rsFlags & ROWSET_NEXT)==0 ); | 442 assert( pRowSet!=0 && (pRowSet->rsFlags & ROWSET_NEXT)==0 ); |
455 | 443 |
456 /* Sort entries into the forest on the first test of a new batch | 444 /* Sort entries into the forest on the first test of a new batch. |
| 445 ** To save unnecessary work, only do this when the batch number changes. |
457 */ | 446 */ |
458 if( iBatch!=pRowSet->iBatch ){ | 447 if( iBatch!=pRowSet->iBatch ){ /*OPTIMIZATION-IF-FALSE*/ |
459 p = pRowSet->pEntry; | 448 p = pRowSet->pEntry; |
460 if( p ){ | 449 if( p ){ |
461 struct RowSetEntry **ppPrevTree = &pRowSet->pForest; | 450 struct RowSetEntry **ppPrevTree = &pRowSet->pForest; |
462 if( (pRowSet->rsFlags & ROWSET_SORTED)==0 ){ | 451 if( (pRowSet->rsFlags & ROWSET_SORTED)==0 ){ /*OPTIMIZATION-IF-FALSE*/ |
| 452 /* Only sort the current set of entiries if they need it */ |
463 p = rowSetEntrySort(p); | 453 p = rowSetEntrySort(p); |
464 } | 454 } |
465 for(pTree = pRowSet->pForest; pTree; pTree=pTree->pRight){ | 455 for(pTree = pRowSet->pForest; pTree; pTree=pTree->pRight){ |
466 ppPrevTree = &pTree->pRight; | 456 ppPrevTree = &pTree->pRight; |
467 if( pTree->pLeft==0 ){ | 457 if( pTree->pLeft==0 ){ |
468 pTree->pLeft = rowSetListToTree(p); | 458 pTree->pLeft = rowSetListToTree(p); |
469 break; | 459 break; |
470 }else{ | 460 }else{ |
471 struct RowSetEntry *pAux, *pTail; | 461 struct RowSetEntry *pAux, *pTail; |
472 rowSetTreeToList(pTree->pLeft, &pAux, &pTail); | 462 rowSetTreeToList(pTree->pLeft, &pAux, &pTail); |
(...skipping 26 matching lines...) Expand all Loading... |
499 p = p->pRight; | 489 p = p->pRight; |
500 }else if( p->v>iRowid ){ | 490 }else if( p->v>iRowid ){ |
501 p = p->pLeft; | 491 p = p->pLeft; |
502 }else{ | 492 }else{ |
503 return 1; | 493 return 1; |
504 } | 494 } |
505 } | 495 } |
506 } | 496 } |
507 return 0; | 497 return 0; |
508 } | 498 } |
OLD | NEW |