Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(11)

Unified Diff: third_party/sqlite/amalgamation/sqlite3.03.c

Issue 2755803002: NCI: trybot test for sqlite 3.17 import. (Closed)
Patch Set: also clang on Linux i386 Created 3 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « third_party/sqlite/amalgamation/sqlite3.02.c ('k') | third_party/sqlite/amalgamation/sqlite3.04.c » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: third_party/sqlite/amalgamation/sqlite3.03.c
diff --git a/third_party/sqlite/amalgamation/sqlite3.03.c b/third_party/sqlite/amalgamation/sqlite3.03.c
new file mode 100644
index 0000000000000000000000000000000000000000..c3b33348fca73363dad1b1860bd64c5bff20582a
--- /dev/null
+++ b/third_party/sqlite/amalgamation/sqlite3.03.c
@@ -0,0 +1,19043 @@
+/************** Begin file btree.c *******************************************/
+/*
+** 2004 April 6
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** This file implements an external (disk-based) database using BTrees.
+** See the header comment on "btreeInt.h" for additional information.
+** Including a description of file format and an overview of operation.
+*/
+/* #include "btreeInt.h" */
+
+/*
+** The header string that appears at the beginning of every
+** SQLite database.
+*/
+static const char zMagicHeader[] = SQLITE_FILE_HEADER;
+
+/*
+** Set this global variable to 1 to enable tracing using the TRACE
+** macro.
+*/
+#if 0
+int sqlite3BtreeTrace=1; /* True to enable tracing */
+# define TRACE(X) if(sqlite3BtreeTrace){printf X;fflush(stdout);}
+#else
+# define TRACE(X)
+#endif
+
+/*
+** Extract a 2-byte big-endian integer from an array of unsigned bytes.
+** But if the value is zero, make it 65536.
+**
+** This routine is used to extract the "offset to cell content area" value
+** from the header of a btree page. If the page size is 65536 and the page
+** is empty, the offset should be 65536, but the 2-byte value stores zero.
+** This routine makes the necessary adjustment to 65536.
+*/
+#define get2byteNotZero(X) (((((int)get2byte(X))-1)&0xffff)+1)
+
+/*
+** Values passed as the 5th argument to allocateBtreePage()
+*/
+#define BTALLOC_ANY 0 /* Allocate any page */
+#define BTALLOC_EXACT 1 /* Allocate exact page if possible */
+#define BTALLOC_LE 2 /* Allocate any page <= the parameter */
+
+/*
+** Macro IfNotOmitAV(x) returns (x) if SQLITE_OMIT_AUTOVACUUM is not
+** defined, or 0 if it is. For example:
+**
+** bIncrVacuum = IfNotOmitAV(pBtShared->incrVacuum);
+*/
+#ifndef SQLITE_OMIT_AUTOVACUUM
+#define IfNotOmitAV(expr) (expr)
+#else
+#define IfNotOmitAV(expr) 0
+#endif
+
+#ifndef SQLITE_OMIT_SHARED_CACHE
+/*
+** A list of BtShared objects that are eligible for participation
+** in shared cache. This variable has file scope during normal builds,
+** but the test harness needs to access it so we make it global for
+** test builds.
+**
+** Access to this variable is protected by SQLITE_MUTEX_STATIC_MASTER.
+*/
+#ifdef SQLITE_TEST
+SQLITE_PRIVATE BtShared *SQLITE_WSD sqlite3SharedCacheList = 0;
+#else
+static BtShared *SQLITE_WSD sqlite3SharedCacheList = 0;
+#endif
+#endif /* SQLITE_OMIT_SHARED_CACHE */
+
+#ifndef SQLITE_OMIT_SHARED_CACHE
+/*
+** Enable or disable the shared pager and schema features.
+**
+** This routine has no effect on existing database connections.
+** The shared cache setting effects only future calls to
+** sqlite3_open(), sqlite3_open16(), or sqlite3_open_v2().
+*/
+SQLITE_API int sqlite3_enable_shared_cache(int enable){
+ sqlite3GlobalConfig.sharedCacheEnabled = enable;
+ return SQLITE_OK;
+}
+#endif
+
+
+
+#ifdef SQLITE_OMIT_SHARED_CACHE
+ /*
+ ** The functions querySharedCacheTableLock(), setSharedCacheTableLock(),
+ ** and clearAllSharedCacheTableLocks()
+ ** manipulate entries in the BtShared.pLock linked list used to store
+ ** shared-cache table level locks. If the library is compiled with the
+ ** shared-cache feature disabled, then there is only ever one user
+ ** of each BtShared structure and so this locking is not necessary.
+ ** So define the lock related functions as no-ops.
+ */
+ #define querySharedCacheTableLock(a,b,c) SQLITE_OK
+ #define setSharedCacheTableLock(a,b,c) SQLITE_OK
+ #define clearAllSharedCacheTableLocks(a)
+ #define downgradeAllSharedCacheTableLocks(a)
+ #define hasSharedCacheTableLock(a,b,c,d) 1
+ #define hasReadConflicts(a, b) 0
+#endif
+
+#ifndef SQLITE_OMIT_SHARED_CACHE
+
+#ifdef SQLITE_DEBUG
+/*
+**** This function is only used as part of an assert() statement. ***
+**
+** Check to see if pBtree holds the required locks to read or write to the
+** table with root page iRoot. Return 1 if it does and 0 if not.
+**
+** For example, when writing to a table with root-page iRoot via
+** Btree connection pBtree:
+**
+** assert( hasSharedCacheTableLock(pBtree, iRoot, 0, WRITE_LOCK) );
+**
+** When writing to an index that resides in a sharable database, the
+** caller should have first obtained a lock specifying the root page of
+** the corresponding table. This makes things a bit more complicated,
+** as this module treats each table as a separate structure. To determine
+** the table corresponding to the index being written, this
+** function has to search through the database schema.
+**
+** Instead of a lock on the table/index rooted at page iRoot, the caller may
+** hold a write-lock on the schema table (root page 1). This is also
+** acceptable.
+*/
+static int hasSharedCacheTableLock(
+ Btree *pBtree, /* Handle that must hold lock */
+ Pgno iRoot, /* Root page of b-tree */
+ int isIndex, /* True if iRoot is the root of an index b-tree */
+ int eLockType /* Required lock type (READ_LOCK or WRITE_LOCK) */
+){
+ Schema *pSchema = (Schema *)pBtree->pBt->pSchema;
+ Pgno iTab = 0;
+ BtLock *pLock;
+
+ /* If this database is not shareable, or if the client is reading
+ ** and has the read-uncommitted flag set, then no lock is required.
+ ** Return true immediately.
+ */
+ if( (pBtree->sharable==0)
+ || (eLockType==READ_LOCK && (pBtree->db->flags & SQLITE_ReadUncommitted))
+ ){
+ return 1;
+ }
+
+ /* If the client is reading or writing an index and the schema is
+ ** not loaded, then it is too difficult to actually check to see if
+ ** the correct locks are held. So do not bother - just return true.
+ ** This case does not come up very often anyhow.
+ */
+ if( isIndex && (!pSchema || (pSchema->schemaFlags&DB_SchemaLoaded)==0) ){
+ return 1;
+ }
+
+ /* Figure out the root-page that the lock should be held on. For table
+ ** b-trees, this is just the root page of the b-tree being read or
+ ** written. For index b-trees, it is the root page of the associated
+ ** table. */
+ if( isIndex ){
+ HashElem *p;
+ for(p=sqliteHashFirst(&pSchema->idxHash); p; p=sqliteHashNext(p)){
+ Index *pIdx = (Index *)sqliteHashData(p);
+ if( pIdx->tnum==(int)iRoot ){
+ if( iTab ){
+ /* Two or more indexes share the same root page. There must
+ ** be imposter tables. So just return true. The assert is not
+ ** useful in that case. */
+ return 1;
+ }
+ iTab = pIdx->pTable->tnum;
+ }
+ }
+ }else{
+ iTab = iRoot;
+ }
+
+ /* Search for the required lock. Either a write-lock on root-page iTab, a
+ ** write-lock on the schema table, or (if the client is reading) a
+ ** read-lock on iTab will suffice. Return 1 if any of these are found. */
+ for(pLock=pBtree->pBt->pLock; pLock; pLock=pLock->pNext){
+ if( pLock->pBtree==pBtree
+ && (pLock->iTable==iTab || (pLock->eLock==WRITE_LOCK && pLock->iTable==1))
+ && pLock->eLock>=eLockType
+ ){
+ return 1;
+ }
+ }
+
+ /* Failed to find the required lock. */
+ return 0;
+}
+#endif /* SQLITE_DEBUG */
+
+#ifdef SQLITE_DEBUG
+/*
+**** This function may be used as part of assert() statements only. ****
+**
+** Return true if it would be illegal for pBtree to write into the
+** table or index rooted at iRoot because other shared connections are
+** simultaneously reading that same table or index.
+**
+** It is illegal for pBtree to write if some other Btree object that
+** shares the same BtShared object is currently reading or writing
+** the iRoot table. Except, if the other Btree object has the
+** read-uncommitted flag set, then it is OK for the other object to
+** have a read cursor.
+**
+** For example, before writing to any part of the table or index
+** rooted at page iRoot, one should call:
+**
+** assert( !hasReadConflicts(pBtree, iRoot) );
+*/
+static int hasReadConflicts(Btree *pBtree, Pgno iRoot){
+ BtCursor *p;
+ for(p=pBtree->pBt->pCursor; p; p=p->pNext){
+ if( p->pgnoRoot==iRoot
+ && p->pBtree!=pBtree
+ && 0==(p->pBtree->db->flags & SQLITE_ReadUncommitted)
+ ){
+ return 1;
+ }
+ }
+ return 0;
+}
+#endif /* #ifdef SQLITE_DEBUG */
+
+/*
+** Query to see if Btree handle p may obtain a lock of type eLock
+** (READ_LOCK or WRITE_LOCK) on the table with root-page iTab. Return
+** SQLITE_OK if the lock may be obtained (by calling
+** setSharedCacheTableLock()), or SQLITE_LOCKED if not.
+*/
+static int querySharedCacheTableLock(Btree *p, Pgno iTab, u8 eLock){
+ BtShared *pBt = p->pBt;
+ BtLock *pIter;
+
+ assert( sqlite3BtreeHoldsMutex(p) );
+ assert( eLock==READ_LOCK || eLock==WRITE_LOCK );
+ assert( p->db!=0 );
+ assert( !(p->db->flags&SQLITE_ReadUncommitted)||eLock==WRITE_LOCK||iTab==1 );
+
+ /* If requesting a write-lock, then the Btree must have an open write
+ ** transaction on this file. And, obviously, for this to be so there
+ ** must be an open write transaction on the file itself.
+ */
+ assert( eLock==READ_LOCK || (p==pBt->pWriter && p->inTrans==TRANS_WRITE) );
+ assert( eLock==READ_LOCK || pBt->inTransaction==TRANS_WRITE );
+
+ /* This routine is a no-op if the shared-cache is not enabled */
+ if( !p->sharable ){
+ return SQLITE_OK;
+ }
+
+ /* If some other connection is holding an exclusive lock, the
+ ** requested lock may not be obtained.
+ */
+ if( pBt->pWriter!=p && (pBt->btsFlags & BTS_EXCLUSIVE)!=0 ){
+ sqlite3ConnectionBlocked(p->db, pBt->pWriter->db);
+ return SQLITE_LOCKED_SHAREDCACHE;
+ }
+
+ for(pIter=pBt->pLock; pIter; pIter=pIter->pNext){
+ /* The condition (pIter->eLock!=eLock) in the following if(...)
+ ** statement is a simplification of:
+ **
+ ** (eLock==WRITE_LOCK || pIter->eLock==WRITE_LOCK)
+ **
+ ** since we know that if eLock==WRITE_LOCK, then no other connection
+ ** may hold a WRITE_LOCK on any table in this file (since there can
+ ** only be a single writer).
+ */
+ assert( pIter->eLock==READ_LOCK || pIter->eLock==WRITE_LOCK );
+ assert( eLock==READ_LOCK || pIter->pBtree==p || pIter->eLock==READ_LOCK);
+ if( pIter->pBtree!=p && pIter->iTable==iTab && pIter->eLock!=eLock ){
+ sqlite3ConnectionBlocked(p->db, pIter->pBtree->db);
+ if( eLock==WRITE_LOCK ){
+ assert( p==pBt->pWriter );
+ pBt->btsFlags |= BTS_PENDING;
+ }
+ return SQLITE_LOCKED_SHAREDCACHE;
+ }
+ }
+ return SQLITE_OK;
+}
+#endif /* !SQLITE_OMIT_SHARED_CACHE */
+
+#ifndef SQLITE_OMIT_SHARED_CACHE
+/*
+** Add a lock on the table with root-page iTable to the shared-btree used
+** by Btree handle p. Parameter eLock must be either READ_LOCK or
+** WRITE_LOCK.
+**
+** This function assumes the following:
+**
+** (a) The specified Btree object p is connected to a sharable
+** database (one with the BtShared.sharable flag set), and
+**
+** (b) No other Btree objects hold a lock that conflicts
+** with the requested lock (i.e. querySharedCacheTableLock() has
+** already been called and returned SQLITE_OK).
+**
+** SQLITE_OK is returned if the lock is added successfully. SQLITE_NOMEM
+** is returned if a malloc attempt fails.
+*/
+static int setSharedCacheTableLock(Btree *p, Pgno iTable, u8 eLock){
+ BtShared *pBt = p->pBt;
+ BtLock *pLock = 0;
+ BtLock *pIter;
+
+ assert( sqlite3BtreeHoldsMutex(p) );
+ assert( eLock==READ_LOCK || eLock==WRITE_LOCK );
+ assert( p->db!=0 );
+
+ /* A connection with the read-uncommitted flag set will never try to
+ ** obtain a read-lock using this function. The only read-lock obtained
+ ** by a connection in read-uncommitted mode is on the sqlite_master
+ ** table, and that lock is obtained in BtreeBeginTrans(). */
+ assert( 0==(p->db->flags&SQLITE_ReadUncommitted) || eLock==WRITE_LOCK );
+
+ /* This function should only be called on a sharable b-tree after it
+ ** has been determined that no other b-tree holds a conflicting lock. */
+ assert( p->sharable );
+ assert( SQLITE_OK==querySharedCacheTableLock(p, iTable, eLock) );
+
+ /* First search the list for an existing lock on this table. */
+ for(pIter=pBt->pLock; pIter; pIter=pIter->pNext){
+ if( pIter->iTable==iTable && pIter->pBtree==p ){
+ pLock = pIter;
+ break;
+ }
+ }
+
+ /* If the above search did not find a BtLock struct associating Btree p
+ ** with table iTable, allocate one and link it into the list.
+ */
+ if( !pLock ){
+ pLock = (BtLock *)sqlite3MallocZero(sizeof(BtLock));
+ if( !pLock ){
+ return SQLITE_NOMEM_BKPT;
+ }
+ pLock->iTable = iTable;
+ pLock->pBtree = p;
+ pLock->pNext = pBt->pLock;
+ pBt->pLock = pLock;
+ }
+
+ /* Set the BtLock.eLock variable to the maximum of the current lock
+ ** and the requested lock. This means if a write-lock was already held
+ ** and a read-lock requested, we don't incorrectly downgrade the lock.
+ */
+ assert( WRITE_LOCK>READ_LOCK );
+ if( eLock>pLock->eLock ){
+ pLock->eLock = eLock;
+ }
+
+ return SQLITE_OK;
+}
+#endif /* !SQLITE_OMIT_SHARED_CACHE */
+
+#ifndef SQLITE_OMIT_SHARED_CACHE
+/*
+** Release all the table locks (locks obtained via calls to
+** the setSharedCacheTableLock() procedure) held by Btree object p.
+**
+** This function assumes that Btree p has an open read or write
+** transaction. If it does not, then the BTS_PENDING flag
+** may be incorrectly cleared.
+*/
+static void clearAllSharedCacheTableLocks(Btree *p){
+ BtShared *pBt = p->pBt;
+ BtLock **ppIter = &pBt->pLock;
+
+ assert( sqlite3BtreeHoldsMutex(p) );
+ assert( p->sharable || 0==*ppIter );
+ assert( p->inTrans>0 );
+
+ while( *ppIter ){
+ BtLock *pLock = *ppIter;
+ assert( (pBt->btsFlags & BTS_EXCLUSIVE)==0 || pBt->pWriter==pLock->pBtree );
+ assert( pLock->pBtree->inTrans>=pLock->eLock );
+ if( pLock->pBtree==p ){
+ *ppIter = pLock->pNext;
+ assert( pLock->iTable!=1 || pLock==&p->lock );
+ if( pLock->iTable!=1 ){
+ sqlite3_free(pLock);
+ }
+ }else{
+ ppIter = &pLock->pNext;
+ }
+ }
+
+ assert( (pBt->btsFlags & BTS_PENDING)==0 || pBt->pWriter );
+ if( pBt->pWriter==p ){
+ pBt->pWriter = 0;
+ pBt->btsFlags &= ~(BTS_EXCLUSIVE|BTS_PENDING);
+ }else if( pBt->nTransaction==2 ){
+ /* This function is called when Btree p is concluding its
+ ** transaction. If there currently exists a writer, and p is not
+ ** that writer, then the number of locks held by connections other
+ ** than the writer must be about to drop to zero. In this case
+ ** set the BTS_PENDING flag to 0.
+ **
+ ** If there is not currently a writer, then BTS_PENDING must
+ ** be zero already. So this next line is harmless in that case.
+ */
+ pBt->btsFlags &= ~BTS_PENDING;
+ }
+}
+
+/*
+** This function changes all write-locks held by Btree p into read-locks.
+*/
+static void downgradeAllSharedCacheTableLocks(Btree *p){
+ BtShared *pBt = p->pBt;
+ if( pBt->pWriter==p ){
+ BtLock *pLock;
+ pBt->pWriter = 0;
+ pBt->btsFlags &= ~(BTS_EXCLUSIVE|BTS_PENDING);
+ for(pLock=pBt->pLock; pLock; pLock=pLock->pNext){
+ assert( pLock->eLock==READ_LOCK || pLock->pBtree==p );
+ pLock->eLock = READ_LOCK;
+ }
+ }
+}
+
+#endif /* SQLITE_OMIT_SHARED_CACHE */
+
+static void releasePage(MemPage *pPage); /* Forward reference */
+
+/*
+***** This routine is used inside of assert() only ****
+**
+** Verify that the cursor holds the mutex on its BtShared
+*/
+#ifdef SQLITE_DEBUG
+static int cursorHoldsMutex(BtCursor *p){
+ return sqlite3_mutex_held(p->pBt->mutex);
+}
+
+/* Verify that the cursor and the BtShared agree about what is the current
+** database connetion. This is important in shared-cache mode. If the database
+** connection pointers get out-of-sync, it is possible for routines like
+** btreeInitPage() to reference an stale connection pointer that references a
+** a connection that has already closed. This routine is used inside assert()
+** statements only and for the purpose of double-checking that the btree code
+** does keep the database connection pointers up-to-date.
+*/
+static int cursorOwnsBtShared(BtCursor *p){
+ assert( cursorHoldsMutex(p) );
+ return (p->pBtree->db==p->pBt->db);
+}
+#endif
+
+/*
+** Invalidate the overflow cache of the cursor passed as the first argument.
+** on the shared btree structure pBt.
+*/
+#define invalidateOverflowCache(pCur) (pCur->curFlags &= ~BTCF_ValidOvfl)
+
+/*
+** Invalidate the overflow page-list cache for all cursors opened
+** on the shared btree structure pBt.
+*/
+static void invalidateAllOverflowCache(BtShared *pBt){
+ BtCursor *p;
+ assert( sqlite3_mutex_held(pBt->mutex) );
+ for(p=pBt->pCursor; p; p=p->pNext){
+ invalidateOverflowCache(p);
+ }
+}
+
+#ifndef SQLITE_OMIT_INCRBLOB
+/*
+** This function is called before modifying the contents of a table
+** to invalidate any incrblob cursors that are open on the
+** row or one of the rows being modified.
+**
+** If argument isClearTable is true, then the entire contents of the
+** table is about to be deleted. In this case invalidate all incrblob
+** cursors open on any row within the table with root-page pgnoRoot.
+**
+** Otherwise, if argument isClearTable is false, then the row with
+** rowid iRow is being replaced or deleted. In this case invalidate
+** only those incrblob cursors open on that specific row.
+*/
+static void invalidateIncrblobCursors(
+ Btree *pBtree, /* The database file to check */
+ i64 iRow, /* The rowid that might be changing */
+ int isClearTable /* True if all rows are being deleted */
+){
+ BtCursor *p;
+ if( pBtree->hasIncrblobCur==0 ) return;
+ assert( sqlite3BtreeHoldsMutex(pBtree) );
+ pBtree->hasIncrblobCur = 0;
+ for(p=pBtree->pBt->pCursor; p; p=p->pNext){
+ if( (p->curFlags & BTCF_Incrblob)!=0 ){
+ pBtree->hasIncrblobCur = 1;
+ if( isClearTable || p->info.nKey==iRow ){
+ p->eState = CURSOR_INVALID;
+ }
+ }
+ }
+}
+
+#else
+ /* Stub function when INCRBLOB is omitted */
+ #define invalidateIncrblobCursors(x,y,z)
+#endif /* SQLITE_OMIT_INCRBLOB */
+
+/*
+** Set bit pgno of the BtShared.pHasContent bitvec. This is called
+** when a page that previously contained data becomes a free-list leaf
+** page.
+**
+** The BtShared.pHasContent bitvec exists to work around an obscure
+** bug caused by the interaction of two useful IO optimizations surrounding
+** free-list leaf pages:
+**
+** 1) When all data is deleted from a page and the page becomes
+** a free-list leaf page, the page is not written to the database
+** (as free-list leaf pages contain no meaningful data). Sometimes
+** such a page is not even journalled (as it will not be modified,
+** why bother journalling it?).
+**
+** 2) When a free-list leaf page is reused, its content is not read
+** from the database or written to the journal file (why should it
+** be, if it is not at all meaningful?).
+**
+** By themselves, these optimizations work fine and provide a handy
+** performance boost to bulk delete or insert operations. However, if
+** a page is moved to the free-list and then reused within the same
+** transaction, a problem comes up. If the page is not journalled when
+** it is moved to the free-list and it is also not journalled when it
+** is extracted from the free-list and reused, then the original data
+** may be lost. In the event of a rollback, it may not be possible
+** to restore the database to its original configuration.
+**
+** The solution is the BtShared.pHasContent bitvec. Whenever a page is
+** moved to become a free-list leaf page, the corresponding bit is
+** set in the bitvec. Whenever a leaf page is extracted from the free-list,
+** optimization 2 above is omitted if the corresponding bit is already
+** set in BtShared.pHasContent. The contents of the bitvec are cleared
+** at the end of every transaction.
+*/
+static int btreeSetHasContent(BtShared *pBt, Pgno pgno){
+ int rc = SQLITE_OK;
+ if( !pBt->pHasContent ){
+ assert( pgno<=pBt->nPage );
+ pBt->pHasContent = sqlite3BitvecCreate(pBt->nPage);
+ if( !pBt->pHasContent ){
+ rc = SQLITE_NOMEM_BKPT;
+ }
+ }
+ if( rc==SQLITE_OK && pgno<=sqlite3BitvecSize(pBt->pHasContent) ){
+ rc = sqlite3BitvecSet(pBt->pHasContent, pgno);
+ }
+ return rc;
+}
+
+/*
+** Query the BtShared.pHasContent vector.
+**
+** This function is called when a free-list leaf page is removed from the
+** free-list for reuse. It returns false if it is safe to retrieve the
+** page from the pager layer with the 'no-content' flag set. True otherwise.
+*/
+static int btreeGetHasContent(BtShared *pBt, Pgno pgno){
+ Bitvec *p = pBt->pHasContent;
+ return (p && (pgno>sqlite3BitvecSize(p) || sqlite3BitvecTest(p, pgno)));
+}
+
+/*
+** Clear (destroy) the BtShared.pHasContent bitvec. This should be
+** invoked at the conclusion of each write-transaction.
+*/
+static void btreeClearHasContent(BtShared *pBt){
+ sqlite3BitvecDestroy(pBt->pHasContent);
+ pBt->pHasContent = 0;
+}
+
+/*
+** Release all of the apPage[] pages for a cursor.
+*/
+static void btreeReleaseAllCursorPages(BtCursor *pCur){
+ int i;
+ for(i=0; i<=pCur->iPage; i++){
+ releasePage(pCur->apPage[i]);
+ pCur->apPage[i] = 0;
+ }
+ pCur->iPage = -1;
+}
+
+/*
+** The cursor passed as the only argument must point to a valid entry
+** when this function is called (i.e. have eState==CURSOR_VALID). This
+** function saves the current cursor key in variables pCur->nKey and
+** pCur->pKey. SQLITE_OK is returned if successful or an SQLite error
+** code otherwise.
+**
+** If the cursor is open on an intkey table, then the integer key
+** (the rowid) is stored in pCur->nKey and pCur->pKey is left set to
+** NULL. If the cursor is open on a non-intkey table, then pCur->pKey is
+** set to point to a malloced buffer pCur->nKey bytes in size containing
+** the key.
+*/
+static int saveCursorKey(BtCursor *pCur){
+ int rc = SQLITE_OK;
+ assert( CURSOR_VALID==pCur->eState );
+ assert( 0==pCur->pKey );
+ assert( cursorHoldsMutex(pCur) );
+
+ if( pCur->curIntKey ){
+ /* Only the rowid is required for a table btree */
+ pCur->nKey = sqlite3BtreeIntegerKey(pCur);
+ }else{
+ /* For an index btree, save the complete key content */
+ void *pKey;
+ pCur->nKey = sqlite3BtreePayloadSize(pCur);
+ pKey = sqlite3Malloc( pCur->nKey );
+ if( pKey ){
+ rc = sqlite3BtreePayload(pCur, 0, (int)pCur->nKey, pKey);
+ if( rc==SQLITE_OK ){
+ pCur->pKey = pKey;
+ }else{
+ sqlite3_free(pKey);
+ }
+ }else{
+ rc = SQLITE_NOMEM_BKPT;
+ }
+ }
+ assert( !pCur->curIntKey || !pCur->pKey );
+ return rc;
+}
+
+/*
+** Save the current cursor position in the variables BtCursor.nKey
+** and BtCursor.pKey. The cursor's state is set to CURSOR_REQUIRESEEK.
+**
+** The caller must ensure that the cursor is valid (has eState==CURSOR_VALID)
+** prior to calling this routine.
+*/
+static int saveCursorPosition(BtCursor *pCur){
+ int rc;
+
+ assert( CURSOR_VALID==pCur->eState || CURSOR_SKIPNEXT==pCur->eState );
+ assert( 0==pCur->pKey );
+ assert( cursorHoldsMutex(pCur) );
+
+ if( pCur->eState==CURSOR_SKIPNEXT ){
+ pCur->eState = CURSOR_VALID;
+ }else{
+ pCur->skipNext = 0;
+ }
+
+ rc = saveCursorKey(pCur);
+ if( rc==SQLITE_OK ){
+ btreeReleaseAllCursorPages(pCur);
+ pCur->eState = CURSOR_REQUIRESEEK;
+ }
+
+ pCur->curFlags &= ~(BTCF_ValidNKey|BTCF_ValidOvfl|BTCF_AtLast);
+ return rc;
+}
+
+/* Forward reference */
+static int SQLITE_NOINLINE saveCursorsOnList(BtCursor*,Pgno,BtCursor*);
+
+/*
+** Save the positions of all cursors (except pExcept) that are open on
+** the table with root-page iRoot. "Saving the cursor position" means that
+** the location in the btree is remembered in such a way that it can be
+** moved back to the same spot after the btree has been modified. This
+** routine is called just before cursor pExcept is used to modify the
+** table, for example in BtreeDelete() or BtreeInsert().
+**
+** If there are two or more cursors on the same btree, then all such
+** cursors should have their BTCF_Multiple flag set. The btreeCursor()
+** routine enforces that rule. This routine only needs to be called in
+** the uncommon case when pExpect has the BTCF_Multiple flag set.
+**
+** If pExpect!=NULL and if no other cursors are found on the same root-page,
+** then the BTCF_Multiple flag on pExpect is cleared, to avoid another
+** pointless call to this routine.
+**
+** Implementation note: This routine merely checks to see if any cursors
+** need to be saved. It calls out to saveCursorsOnList() in the (unusual)
+** event that cursors are in need to being saved.
+*/
+static int saveAllCursors(BtShared *pBt, Pgno iRoot, BtCursor *pExcept){
+ BtCursor *p;
+ assert( sqlite3_mutex_held(pBt->mutex) );
+ assert( pExcept==0 || pExcept->pBt==pBt );
+ for(p=pBt->pCursor; p; p=p->pNext){
+ if( p!=pExcept && (0==iRoot || p->pgnoRoot==iRoot) ) break;
+ }
+ if( p ) return saveCursorsOnList(p, iRoot, pExcept);
+ if( pExcept ) pExcept->curFlags &= ~BTCF_Multiple;
+ return SQLITE_OK;
+}
+
+/* This helper routine to saveAllCursors does the actual work of saving
+** the cursors if and when a cursor is found that actually requires saving.
+** The common case is that no cursors need to be saved, so this routine is
+** broken out from its caller to avoid unnecessary stack pointer movement.
+*/
+static int SQLITE_NOINLINE saveCursorsOnList(
+ BtCursor *p, /* The first cursor that needs saving */
+ Pgno iRoot, /* Only save cursor with this iRoot. Save all if zero */
+ BtCursor *pExcept /* Do not save this cursor */
+){
+ do{
+ if( p!=pExcept && (0==iRoot || p->pgnoRoot==iRoot) ){
+ if( p->eState==CURSOR_VALID || p->eState==CURSOR_SKIPNEXT ){
+ int rc = saveCursorPosition(p);
+ if( SQLITE_OK!=rc ){
+ return rc;
+ }
+ }else{
+ testcase( p->iPage>0 );
+ btreeReleaseAllCursorPages(p);
+ }
+ }
+ p = p->pNext;
+ }while( p );
+ return SQLITE_OK;
+}
+
+/*
+** Clear the current cursor position.
+*/
+SQLITE_PRIVATE void sqlite3BtreeClearCursor(BtCursor *pCur){
+ assert( cursorHoldsMutex(pCur) );
+ sqlite3_free(pCur->pKey);
+ pCur->pKey = 0;
+ pCur->eState = CURSOR_INVALID;
+}
+
+/*
+** In this version of BtreeMoveto, pKey is a packed index record
+** such as is generated by the OP_MakeRecord opcode. Unpack the
+** record and then call BtreeMovetoUnpacked() to do the work.
+*/
+static int btreeMoveto(
+ BtCursor *pCur, /* Cursor open on the btree to be searched */
+ const void *pKey, /* Packed key if the btree is an index */
+ i64 nKey, /* Integer key for tables. Size of pKey for indices */
+ int bias, /* Bias search to the high end */
+ int *pRes /* Write search results here */
+){
+ int rc; /* Status code */
+ UnpackedRecord *pIdxKey; /* Unpacked index key */
+
+ if( pKey ){
+ assert( nKey==(i64)(int)nKey );
+ pIdxKey = sqlite3VdbeAllocUnpackedRecord(pCur->pKeyInfo);
+ if( pIdxKey==0 ) return SQLITE_NOMEM_BKPT;
+ sqlite3VdbeRecordUnpack(pCur->pKeyInfo, (int)nKey, pKey, pIdxKey);
+ if( pIdxKey->nField==0 ){
+ rc = SQLITE_CORRUPT_BKPT;
+ goto moveto_done;
+ }
+ }else{
+ pIdxKey = 0;
+ }
+ rc = sqlite3BtreeMovetoUnpacked(pCur, pIdxKey, nKey, bias, pRes);
+moveto_done:
+ if( pIdxKey ){
+ sqlite3DbFree(pCur->pKeyInfo->db, pIdxKey);
+ }
+ return rc;
+}
+
+/*
+** Restore the cursor to the position it was in (or as close to as possible)
+** when saveCursorPosition() was called. Note that this call deletes the
+** saved position info stored by saveCursorPosition(), so there can be
+** at most one effective restoreCursorPosition() call after each
+** saveCursorPosition().
+*/
+static int btreeRestoreCursorPosition(BtCursor *pCur){
+ int rc;
+ int skipNext;
+ assert( cursorOwnsBtShared(pCur) );
+ assert( pCur->eState>=CURSOR_REQUIRESEEK );
+ if( pCur->eState==CURSOR_FAULT ){
+ return pCur->skipNext;
+ }
+ pCur->eState = CURSOR_INVALID;
+ rc = btreeMoveto(pCur, pCur->pKey, pCur->nKey, 0, &skipNext);
+ if( rc==SQLITE_OK ){
+ sqlite3_free(pCur->pKey);
+ pCur->pKey = 0;
+ assert( pCur->eState==CURSOR_VALID || pCur->eState==CURSOR_INVALID );
+ pCur->skipNext |= skipNext;
+ if( pCur->skipNext && pCur->eState==CURSOR_VALID ){
+ pCur->eState = CURSOR_SKIPNEXT;
+ }
+ }
+ return rc;
+}
+
+#define restoreCursorPosition(p) \
+ (p->eState>=CURSOR_REQUIRESEEK ? \
+ btreeRestoreCursorPosition(p) : \
+ SQLITE_OK)
+
+/*
+** Determine whether or not a cursor has moved from the position where
+** it was last placed, or has been invalidated for any other reason.
+** Cursors can move when the row they are pointing at is deleted out
+** from under them, for example. Cursor might also move if a btree
+** is rebalanced.
+**
+** Calling this routine with a NULL cursor pointer returns false.
+**
+** Use the separate sqlite3BtreeCursorRestore() routine to restore a cursor
+** back to where it ought to be if this routine returns true.
+*/
+SQLITE_PRIVATE int sqlite3BtreeCursorHasMoved(BtCursor *pCur){
+ return pCur->eState!=CURSOR_VALID;
+}
+
+/*
+** This routine restores a cursor back to its original position after it
+** has been moved by some outside activity (such as a btree rebalance or
+** a row having been deleted out from under the cursor).
+**
+** On success, the *pDifferentRow parameter is false if the cursor is left
+** pointing at exactly the same row. *pDifferntRow is the row the cursor
+** was pointing to has been deleted, forcing the cursor to point to some
+** nearby row.
+**
+** This routine should only be called for a cursor that just returned
+** TRUE from sqlite3BtreeCursorHasMoved().
+*/
+SQLITE_PRIVATE int sqlite3BtreeCursorRestore(BtCursor *pCur, int *pDifferentRow){
+ int rc;
+
+ assert( pCur!=0 );
+ assert( pCur->eState!=CURSOR_VALID );
+ rc = restoreCursorPosition(pCur);
+ if( rc ){
+ *pDifferentRow = 1;
+ return rc;
+ }
+ if( pCur->eState!=CURSOR_VALID ){
+ *pDifferentRow = 1;
+ }else{
+ assert( pCur->skipNext==0 );
+ *pDifferentRow = 0;
+ }
+ return SQLITE_OK;
+}
+
+#ifdef SQLITE_ENABLE_CURSOR_HINTS
+/*
+** Provide hints to the cursor. The particular hint given (and the type
+** and number of the varargs parameters) is determined by the eHintType
+** parameter. See the definitions of the BTREE_HINT_* macros for details.
+*/
+SQLITE_PRIVATE void sqlite3BtreeCursorHint(BtCursor *pCur, int eHintType, ...){
+ /* Used only by system that substitute their own storage engine */
+}
+#endif
+
+/*
+** Provide flag hints to the cursor.
+*/
+SQLITE_PRIVATE void sqlite3BtreeCursorHintFlags(BtCursor *pCur, unsigned x){
+ assert( x==BTREE_SEEK_EQ || x==BTREE_BULKLOAD || x==0 );
+ pCur->hints = x;
+}
+
+
+#ifndef SQLITE_OMIT_AUTOVACUUM
+/*
+** Given a page number of a regular database page, return the page
+** number for the pointer-map page that contains the entry for the
+** input page number.
+**
+** Return 0 (not a valid page) for pgno==1 since there is
+** no pointer map associated with page 1. The integrity_check logic
+** requires that ptrmapPageno(*,1)!=1.
+*/
+static Pgno ptrmapPageno(BtShared *pBt, Pgno pgno){
+ int nPagesPerMapPage;
+ Pgno iPtrMap, ret;
+ assert( sqlite3_mutex_held(pBt->mutex) );
+ if( pgno<2 ) return 0;
+ nPagesPerMapPage = (pBt->usableSize/5)+1;
+ iPtrMap = (pgno-2)/nPagesPerMapPage;
+ ret = (iPtrMap*nPagesPerMapPage) + 2;
+ if( ret==PENDING_BYTE_PAGE(pBt) ){
+ ret++;
+ }
+ return ret;
+}
+
+/*
+** Write an entry into the pointer map.
+**
+** This routine updates the pointer map entry for page number 'key'
+** so that it maps to type 'eType' and parent page number 'pgno'.
+**
+** If *pRC is initially non-zero (non-SQLITE_OK) then this routine is
+** a no-op. If an error occurs, the appropriate error code is written
+** into *pRC.
+*/
+static void ptrmapPut(BtShared *pBt, Pgno key, u8 eType, Pgno parent, int *pRC){
+ DbPage *pDbPage; /* The pointer map page */
+ u8 *pPtrmap; /* The pointer map data */
+ Pgno iPtrmap; /* The pointer map page number */
+ int offset; /* Offset in pointer map page */
+ int rc; /* Return code from subfunctions */
+
+ if( *pRC ) return;
+
+ assert( sqlite3_mutex_held(pBt->mutex) );
+ /* The master-journal page number must never be used as a pointer map page */
+ assert( 0==PTRMAP_ISPAGE(pBt, PENDING_BYTE_PAGE(pBt)) );
+
+ assert( pBt->autoVacuum );
+ if( key==0 ){
+ *pRC = SQLITE_CORRUPT_BKPT;
+ return;
+ }
+ iPtrmap = PTRMAP_PAGENO(pBt, key);
+ rc = sqlite3PagerGet(pBt->pPager, iPtrmap, &pDbPage, 0);
+ if( rc!=SQLITE_OK ){
+ *pRC = rc;
+ return;
+ }
+ offset = PTRMAP_PTROFFSET(iPtrmap, key);
+ if( offset<0 ){
+ *pRC = SQLITE_CORRUPT_BKPT;
+ goto ptrmap_exit;
+ }
+ assert( offset <= (int)pBt->usableSize-5 );
+ pPtrmap = (u8 *)sqlite3PagerGetData(pDbPage);
+
+ if( eType!=pPtrmap[offset] || get4byte(&pPtrmap[offset+1])!=parent ){
+ TRACE(("PTRMAP_UPDATE: %d->(%d,%d)\n", key, eType, parent));
+ *pRC= rc = sqlite3PagerWrite(pDbPage);
+ if( rc==SQLITE_OK ){
+ pPtrmap[offset] = eType;
+ put4byte(&pPtrmap[offset+1], parent);
+ }
+ }
+
+ptrmap_exit:
+ sqlite3PagerUnref(pDbPage);
+}
+
+/*
+** Read an entry from the pointer map.
+**
+** This routine retrieves the pointer map entry for page 'key', writing
+** the type and parent page number to *pEType and *pPgno respectively.
+** An error code is returned if something goes wrong, otherwise SQLITE_OK.
+*/
+static int ptrmapGet(BtShared *pBt, Pgno key, u8 *pEType, Pgno *pPgno){
+ DbPage *pDbPage; /* The pointer map page */
+ int iPtrmap; /* Pointer map page index */
+ u8 *pPtrmap; /* Pointer map page data */
+ int offset; /* Offset of entry in pointer map */
+ int rc;
+
+ assert( sqlite3_mutex_held(pBt->mutex) );
+
+ iPtrmap = PTRMAP_PAGENO(pBt, key);
+ rc = sqlite3PagerGet(pBt->pPager, iPtrmap, &pDbPage, 0);
+ if( rc!=0 ){
+ return rc;
+ }
+ pPtrmap = (u8 *)sqlite3PagerGetData(pDbPage);
+
+ offset = PTRMAP_PTROFFSET(iPtrmap, key);
+ if( offset<0 ){
+ sqlite3PagerUnref(pDbPage);
+ return SQLITE_CORRUPT_BKPT;
+ }
+ assert( offset <= (int)pBt->usableSize-5 );
+ assert( pEType!=0 );
+ *pEType = pPtrmap[offset];
+ if( pPgno ) *pPgno = get4byte(&pPtrmap[offset+1]);
+
+ sqlite3PagerUnref(pDbPage);
+ if( *pEType<1 || *pEType>5 ) return SQLITE_CORRUPT_BKPT;
+ return SQLITE_OK;
+}
+
+#else /* if defined SQLITE_OMIT_AUTOVACUUM */
+ #define ptrmapPut(w,x,y,z,rc)
+ #define ptrmapGet(w,x,y,z) SQLITE_OK
+ #define ptrmapPutOvflPtr(x, y, rc)
+#endif
+
+/*
+** Given a btree page and a cell index (0 means the first cell on
+** the page, 1 means the second cell, and so forth) return a pointer
+** to the cell content.
+**
+** findCellPastPtr() does the same except it skips past the initial
+** 4-byte child pointer found on interior pages, if there is one.
+**
+** This routine works only for pages that do not contain overflow cells.
+*/
+#define findCell(P,I) \
+ ((P)->aData + ((P)->maskPage & get2byteAligned(&(P)->aCellIdx[2*(I)])))
+#define findCellPastPtr(P,I) \
+ ((P)->aDataOfst + ((P)->maskPage & get2byteAligned(&(P)->aCellIdx[2*(I)])))
+
+
+/*
+** This is common tail processing for btreeParseCellPtr() and
+** btreeParseCellPtrIndex() for the case when the cell does not fit entirely
+** on a single B-tree page. Make necessary adjustments to the CellInfo
+** structure.
+*/
+static SQLITE_NOINLINE void btreeParseCellAdjustSizeForOverflow(
+ MemPage *pPage, /* Page containing the cell */
+ u8 *pCell, /* Pointer to the cell text. */
+ CellInfo *pInfo /* Fill in this structure */
+){
+ /* If the payload will not fit completely on the local page, we have
+ ** to decide how much to store locally and how much to spill onto
+ ** overflow pages. The strategy is to minimize the amount of unused
+ ** space on overflow pages while keeping the amount of local storage
+ ** in between minLocal and maxLocal.
+ **
+ ** Warning: changing the way overflow payload is distributed in any
+ ** way will result in an incompatible file format.
+ */
+ int minLocal; /* Minimum amount of payload held locally */
+ int maxLocal; /* Maximum amount of payload held locally */
+ int surplus; /* Overflow payload available for local storage */
+
+ minLocal = pPage->minLocal;
+ maxLocal = pPage->maxLocal;
+ surplus = minLocal + (pInfo->nPayload - minLocal)%(pPage->pBt->usableSize-4);
+ testcase( surplus==maxLocal );
+ testcase( surplus==maxLocal+1 );
+ if( surplus <= maxLocal ){
+ pInfo->nLocal = (u16)surplus;
+ }else{
+ pInfo->nLocal = (u16)minLocal;
+ }
+ pInfo->nSize = (u16)(&pInfo->pPayload[pInfo->nLocal] - pCell) + 4;
+}
+
+/*
+** The following routines are implementations of the MemPage.xParseCell()
+** method.
+**
+** Parse a cell content block and fill in the CellInfo structure.
+**
+** btreeParseCellPtr() => table btree leaf nodes
+** btreeParseCellNoPayload() => table btree internal nodes
+** btreeParseCellPtrIndex() => index btree nodes
+**
+** There is also a wrapper function btreeParseCell() that works for
+** all MemPage types and that references the cell by index rather than
+** by pointer.
+*/
+static void btreeParseCellPtrNoPayload(
+ MemPage *pPage, /* Page containing the cell */
+ u8 *pCell, /* Pointer to the cell text. */
+ CellInfo *pInfo /* Fill in this structure */
+){
+ assert( sqlite3_mutex_held(pPage->pBt->mutex) );
+ assert( pPage->leaf==0 );
+ assert( pPage->childPtrSize==4 );
+#ifndef SQLITE_DEBUG
+ UNUSED_PARAMETER(pPage);
+#endif
+ pInfo->nSize = 4 + getVarint(&pCell[4], (u64*)&pInfo->nKey);
+ pInfo->nPayload = 0;
+ pInfo->nLocal = 0;
+ pInfo->pPayload = 0;
+ return;
+}
+static void btreeParseCellPtr(
+ MemPage *pPage, /* Page containing the cell */
+ u8 *pCell, /* Pointer to the cell text. */
+ CellInfo *pInfo /* Fill in this structure */
+){
+ u8 *pIter; /* For scanning through pCell */
+ u32 nPayload; /* Number of bytes of cell payload */
+ u64 iKey; /* Extracted Key value */
+
+ assert( sqlite3_mutex_held(pPage->pBt->mutex) );
+ assert( pPage->leaf==0 || pPage->leaf==1 );
+ assert( pPage->intKeyLeaf );
+ assert( pPage->childPtrSize==0 );
+ pIter = pCell;
+
+ /* The next block of code is equivalent to:
+ **
+ ** pIter += getVarint32(pIter, nPayload);
+ **
+ ** The code is inlined to avoid a function call.
+ */
+ nPayload = *pIter;
+ if( nPayload>=0x80 ){
+ u8 *pEnd = &pIter[8];
+ nPayload &= 0x7f;
+ do{
+ nPayload = (nPayload<<7) | (*++pIter & 0x7f);
+ }while( (*pIter)>=0x80 && pIter<pEnd );
+ }
+ pIter++;
+
+ /* The next block of code is equivalent to:
+ **
+ ** pIter += getVarint(pIter, (u64*)&pInfo->nKey);
+ **
+ ** The code is inlined to avoid a function call.
+ */
+ iKey = *pIter;
+ if( iKey>=0x80 ){
+ u8 *pEnd = &pIter[7];
+ iKey &= 0x7f;
+ while(1){
+ iKey = (iKey<<7) | (*++pIter & 0x7f);
+ if( (*pIter)<0x80 ) break;
+ if( pIter>=pEnd ){
+ iKey = (iKey<<8) | *++pIter;
+ break;
+ }
+ }
+ }
+ pIter++;
+
+ pInfo->nKey = *(i64*)&iKey;
+ pInfo->nPayload = nPayload;
+ pInfo->pPayload = pIter;
+ testcase( nPayload==pPage->maxLocal );
+ testcase( nPayload==pPage->maxLocal+1 );
+ if( nPayload<=pPage->maxLocal ){
+ /* This is the (easy) common case where the entire payload fits
+ ** on the local page. No overflow is required.
+ */
+ pInfo->nSize = nPayload + (u16)(pIter - pCell);
+ if( pInfo->nSize<4 ) pInfo->nSize = 4;
+ pInfo->nLocal = (u16)nPayload;
+ }else{
+ btreeParseCellAdjustSizeForOverflow(pPage, pCell, pInfo);
+ }
+}
+static void btreeParseCellPtrIndex(
+ MemPage *pPage, /* Page containing the cell */
+ u8 *pCell, /* Pointer to the cell text. */
+ CellInfo *pInfo /* Fill in this structure */
+){
+ u8 *pIter; /* For scanning through pCell */
+ u32 nPayload; /* Number of bytes of cell payload */
+
+ assert( sqlite3_mutex_held(pPage->pBt->mutex) );
+ assert( pPage->leaf==0 || pPage->leaf==1 );
+ assert( pPage->intKeyLeaf==0 );
+ pIter = pCell + pPage->childPtrSize;
+ nPayload = *pIter;
+ if( nPayload>=0x80 ){
+ u8 *pEnd = &pIter[8];
+ nPayload &= 0x7f;
+ do{
+ nPayload = (nPayload<<7) | (*++pIter & 0x7f);
+ }while( *(pIter)>=0x80 && pIter<pEnd );
+ }
+ pIter++;
+ pInfo->nKey = nPayload;
+ pInfo->nPayload = nPayload;
+ pInfo->pPayload = pIter;
+ testcase( nPayload==pPage->maxLocal );
+ testcase( nPayload==pPage->maxLocal+1 );
+ if( nPayload<=pPage->maxLocal ){
+ /* This is the (easy) common case where the entire payload fits
+ ** on the local page. No overflow is required.
+ */
+ pInfo->nSize = nPayload + (u16)(pIter - pCell);
+ if( pInfo->nSize<4 ) pInfo->nSize = 4;
+ pInfo->nLocal = (u16)nPayload;
+ }else{
+ btreeParseCellAdjustSizeForOverflow(pPage, pCell, pInfo);
+ }
+}
+static void btreeParseCell(
+ MemPage *pPage, /* Page containing the cell */
+ int iCell, /* The cell index. First cell is 0 */
+ CellInfo *pInfo /* Fill in this structure */
+){
+ pPage->xParseCell(pPage, findCell(pPage, iCell), pInfo);
+}
+
+/*
+** The following routines are implementations of the MemPage.xCellSize
+** method.
+**
+** Compute the total number of bytes that a Cell needs in the cell
+** data area of the btree-page. The return number includes the cell
+** data header and the local payload, but not any overflow page or
+** the space used by the cell pointer.
+**
+** cellSizePtrNoPayload() => table internal nodes
+** cellSizePtr() => all index nodes & table leaf nodes
+*/
+static u16 cellSizePtr(MemPage *pPage, u8 *pCell){
+ u8 *pIter = pCell + pPage->childPtrSize; /* For looping over bytes of pCell */
+ u8 *pEnd; /* End mark for a varint */
+ u32 nSize; /* Size value to return */
+
+#ifdef SQLITE_DEBUG
+ /* The value returned by this function should always be the same as
+ ** the (CellInfo.nSize) value found by doing a full parse of the
+ ** cell. If SQLITE_DEBUG is defined, an assert() at the bottom of
+ ** this function verifies that this invariant is not violated. */
+ CellInfo debuginfo;
+ pPage->xParseCell(pPage, pCell, &debuginfo);
+#endif
+
+ nSize = *pIter;
+ if( nSize>=0x80 ){
+ pEnd = &pIter[8];
+ nSize &= 0x7f;
+ do{
+ nSize = (nSize<<7) | (*++pIter & 0x7f);
+ }while( *(pIter)>=0x80 && pIter<pEnd );
+ }
+ pIter++;
+ if( pPage->intKey ){
+ /* pIter now points at the 64-bit integer key value, a variable length
+ ** integer. The following block moves pIter to point at the first byte
+ ** past the end of the key value. */
+ pEnd = &pIter[9];
+ while( (*pIter++)&0x80 && pIter<pEnd );
+ }
+ testcase( nSize==pPage->maxLocal );
+ testcase( nSize==pPage->maxLocal+1 );
+ if( nSize<=pPage->maxLocal ){
+ nSize += (u32)(pIter - pCell);
+ if( nSize<4 ) nSize = 4;
+ }else{
+ int minLocal = pPage->minLocal;
+ nSize = minLocal + (nSize - minLocal) % (pPage->pBt->usableSize - 4);
+ testcase( nSize==pPage->maxLocal );
+ testcase( nSize==pPage->maxLocal+1 );
+ if( nSize>pPage->maxLocal ){
+ nSize = minLocal;
+ }
+ nSize += 4 + (u16)(pIter - pCell);
+ }
+ assert( nSize==debuginfo.nSize || CORRUPT_DB );
+ return (u16)nSize;
+}
+static u16 cellSizePtrNoPayload(MemPage *pPage, u8 *pCell){
+ u8 *pIter = pCell + 4; /* For looping over bytes of pCell */
+ u8 *pEnd; /* End mark for a varint */
+
+#ifdef SQLITE_DEBUG
+ /* The value returned by this function should always be the same as
+ ** the (CellInfo.nSize) value found by doing a full parse of the
+ ** cell. If SQLITE_DEBUG is defined, an assert() at the bottom of
+ ** this function verifies that this invariant is not violated. */
+ CellInfo debuginfo;
+ pPage->xParseCell(pPage, pCell, &debuginfo);
+#else
+ UNUSED_PARAMETER(pPage);
+#endif
+
+ assert( pPage->childPtrSize==4 );
+ pEnd = pIter + 9;
+ while( (*pIter++)&0x80 && pIter<pEnd );
+ assert( debuginfo.nSize==(u16)(pIter - pCell) || CORRUPT_DB );
+ return (u16)(pIter - pCell);
+}
+
+
+#ifdef SQLITE_DEBUG
+/* This variation on cellSizePtr() is used inside of assert() statements
+** only. */
+static u16 cellSize(MemPage *pPage, int iCell){
+ return pPage->xCellSize(pPage, findCell(pPage, iCell));
+}
+#endif
+
+#ifndef SQLITE_OMIT_AUTOVACUUM
+/*
+** If the cell pCell, part of page pPage contains a pointer
+** to an overflow page, insert an entry into the pointer-map
+** for the overflow page.
+*/
+static void ptrmapPutOvflPtr(MemPage *pPage, u8 *pCell, int *pRC){
+ CellInfo info;
+ if( *pRC ) return;
+ assert( pCell!=0 );
+ pPage->xParseCell(pPage, pCell, &info);
+ if( info.nLocal<info.nPayload ){
+ Pgno ovfl = get4byte(&pCell[info.nSize-4]);
+ ptrmapPut(pPage->pBt, ovfl, PTRMAP_OVERFLOW1, pPage->pgno, pRC);
+ }
+}
+#endif
+
+
+/*
+** Defragment the page given. All Cells are moved to the
+** end of the page and all free space is collected into one
+** big FreeBlk that occurs in between the header and cell
+** pointer array and the cell content area.
+**
+** EVIDENCE-OF: R-44582-60138 SQLite may from time to time reorganize a
+** b-tree page so that there are no freeblocks or fragment bytes, all
+** unused bytes are contained in the unallocated space region, and all
+** cells are packed tightly at the end of the page.
+*/
+static int defragmentPage(MemPage *pPage){
+ int i; /* Loop counter */
+ int pc; /* Address of the i-th cell */
+ int hdr; /* Offset to the page header */
+ int size; /* Size of a cell */
+ int usableSize; /* Number of usable bytes on a page */
+ int cellOffset; /* Offset to the cell pointer array */
+ int cbrk; /* Offset to the cell content area */
+ int nCell; /* Number of cells on the page */
+ unsigned char *data; /* The page data */
+ unsigned char *temp; /* Temp area for cell content */
+ unsigned char *src; /* Source of content */
+ int iCellFirst; /* First allowable cell index */
+ int iCellLast; /* Last possible cell index */
+
+
+ assert( sqlite3PagerIswriteable(pPage->pDbPage) );
+ assert( pPage->pBt!=0 );
+ assert( pPage->pBt->usableSize <= SQLITE_MAX_PAGE_SIZE );
+ assert( pPage->nOverflow==0 );
+ assert( sqlite3_mutex_held(pPage->pBt->mutex) );
+ temp = 0;
+ src = data = pPage->aData;
+ hdr = pPage->hdrOffset;
+ cellOffset = pPage->cellOffset;
+ nCell = pPage->nCell;
+ assert( nCell==get2byte(&data[hdr+3]) );
+ usableSize = pPage->pBt->usableSize;
+ cbrk = usableSize;
+ iCellFirst = cellOffset + 2*nCell;
+ iCellLast = usableSize - 4;
+ for(i=0; i<nCell; i++){
+ u8 *pAddr; /* The i-th cell pointer */
+ pAddr = &data[cellOffset + i*2];
+ pc = get2byte(pAddr);
+ testcase( pc==iCellFirst );
+ testcase( pc==iCellLast );
+ /* These conditions have already been verified in btreeInitPage()
+ ** if PRAGMA cell_size_check=ON.
+ */
+ if( pc<iCellFirst || pc>iCellLast ){
+ return SQLITE_CORRUPT_BKPT;
+ }
+ assert( pc>=iCellFirst && pc<=iCellLast );
+ size = pPage->xCellSize(pPage, &src[pc]);
+ cbrk -= size;
+ if( cbrk<iCellFirst || pc+size>usableSize ){
+ return SQLITE_CORRUPT_BKPT;
+ }
+ assert( cbrk+size<=usableSize && cbrk>=iCellFirst );
+ testcase( cbrk+size==usableSize );
+ testcase( pc+size==usableSize );
+ put2byte(pAddr, cbrk);
+ if( temp==0 ){
+ int x;
+ if( cbrk==pc ) continue;
+ temp = sqlite3PagerTempSpace(pPage->pBt->pPager);
+ x = get2byte(&data[hdr+5]);
+ memcpy(&temp[x], &data[x], (cbrk+size) - x);
+ src = temp;
+ }
+ memcpy(&data[cbrk], &src[pc], size);
+ }
+ assert( cbrk>=iCellFirst );
+ put2byte(&data[hdr+5], cbrk);
+ data[hdr+1] = 0;
+ data[hdr+2] = 0;
+ data[hdr+7] = 0;
+ memset(&data[iCellFirst], 0, cbrk-iCellFirst);
+ assert( sqlite3PagerIswriteable(pPage->pDbPage) );
+ if( cbrk-iCellFirst!=pPage->nFree ){
+ return SQLITE_CORRUPT_BKPT;
+ }
+ return SQLITE_OK;
+}
+
+/*
+** Search the free-list on page pPg for space to store a cell nByte bytes in
+** size. If one can be found, return a pointer to the space and remove it
+** from the free-list.
+**
+** If no suitable space can be found on the free-list, return NULL.
+**
+** This function may detect corruption within pPg. If corruption is
+** detected then *pRc is set to SQLITE_CORRUPT and NULL is returned.
+**
+** Slots on the free list that are between 1 and 3 bytes larger than nByte
+** will be ignored if adding the extra space to the fragmentation count
+** causes the fragmentation count to exceed 60.
+*/
+static u8 *pageFindSlot(MemPage *pPg, int nByte, int *pRc){
+ const int hdr = pPg->hdrOffset;
+ u8 * const aData = pPg->aData;
+ int iAddr = hdr + 1;
+ int pc = get2byte(&aData[iAddr]);
+ int x;
+ int usableSize = pPg->pBt->usableSize;
+
+ assert( pc>0 );
+ do{
+ int size; /* Size of the free slot */
+ /* EVIDENCE-OF: R-06866-39125 Freeblocks are always connected in order of
+ ** increasing offset. */
+ if( pc>usableSize-4 || pc<iAddr+4 ){
+ *pRc = SQLITE_CORRUPT_BKPT;
+ return 0;
+ }
+ /* EVIDENCE-OF: R-22710-53328 The third and fourth bytes of each
+ ** freeblock form a big-endian integer which is the size of the freeblock
+ ** in bytes, including the 4-byte header. */
+ size = get2byte(&aData[pc+2]);
+ if( (x = size - nByte)>=0 ){
+ testcase( x==4 );
+ testcase( x==3 );
+ if( pc < pPg->cellOffset+2*pPg->nCell || size+pc > usableSize ){
+ *pRc = SQLITE_CORRUPT_BKPT;
+ return 0;
+ }else if( x<4 ){
+ /* EVIDENCE-OF: R-11498-58022 In a well-formed b-tree page, the total
+ ** number of bytes in fragments may not exceed 60. */
+ if( aData[hdr+7]>57 ) return 0;
+
+ /* Remove the slot from the free-list. Update the number of
+ ** fragmented bytes within the page. */
+ memcpy(&aData[iAddr], &aData[pc], 2);
+ aData[hdr+7] += (u8)x;
+ }else{
+ /* The slot remains on the free-list. Reduce its size to account
+ ** for the portion used by the new allocation. */
+ put2byte(&aData[pc+2], x);
+ }
+ return &aData[pc + x];
+ }
+ iAddr = pc;
+ pc = get2byte(&aData[pc]);
+ }while( pc );
+
+ return 0;
+}
+
+/*
+** Allocate nByte bytes of space from within the B-Tree page passed
+** as the first argument. Write into *pIdx the index into pPage->aData[]
+** of the first byte of allocated space. Return either SQLITE_OK or
+** an error code (usually SQLITE_CORRUPT).
+**
+** The caller guarantees that there is sufficient space to make the
+** allocation. This routine might need to defragment in order to bring
+** all the space together, however. This routine will avoid using
+** the first two bytes past the cell pointer area since presumably this
+** allocation is being made in order to insert a new cell, so we will
+** also end up needing a new cell pointer.
+*/
+static int allocateSpace(MemPage *pPage, int nByte, int *pIdx){
+ const int hdr = pPage->hdrOffset; /* Local cache of pPage->hdrOffset */
+ u8 * const data = pPage->aData; /* Local cache of pPage->aData */
+ int top; /* First byte of cell content area */
+ int rc = SQLITE_OK; /* Integer return code */
+ int gap; /* First byte of gap between cell pointers and cell content */
+
+ assert( sqlite3PagerIswriteable(pPage->pDbPage) );
+ assert( pPage->pBt );
+ assert( sqlite3_mutex_held(pPage->pBt->mutex) );
+ assert( nByte>=0 ); /* Minimum cell size is 4 */
+ assert( pPage->nFree>=nByte );
+ assert( pPage->nOverflow==0 );
+ assert( nByte < (int)(pPage->pBt->usableSize-8) );
+
+ assert( pPage->cellOffset == hdr + 12 - 4*pPage->leaf );
+ gap = pPage->cellOffset + 2*pPage->nCell;
+ assert( gap<=65536 );
+ /* EVIDENCE-OF: R-29356-02391 If the database uses a 65536-byte page size
+ ** and the reserved space is zero (the usual value for reserved space)
+ ** then the cell content offset of an empty page wants to be 65536.
+ ** However, that integer is too large to be stored in a 2-byte unsigned
+ ** integer, so a value of 0 is used in its place. */
+ top = get2byte(&data[hdr+5]);
+ assert( top<=(int)pPage->pBt->usableSize ); /* Prevent by getAndInitPage() */
+ if( gap>top ){
+ if( top==0 && pPage->pBt->usableSize==65536 ){
+ top = 65536;
+ }else{
+ return SQLITE_CORRUPT_BKPT;
+ }
+ }
+
+ /* If there is enough space between gap and top for one more cell pointer
+ ** array entry offset, and if the freelist is not empty, then search the
+ ** freelist looking for a free slot big enough to satisfy the request.
+ */
+ testcase( gap+2==top );
+ testcase( gap+1==top );
+ testcase( gap==top );
+ if( (data[hdr+2] || data[hdr+1]) && gap+2<=top ){
+ u8 *pSpace = pageFindSlot(pPage, nByte, &rc);
+ if( pSpace ){
+ assert( pSpace>=data && (pSpace - data)<65536 );
+ *pIdx = (int)(pSpace - data);
+ return SQLITE_OK;
+ }else if( rc ){
+ return rc;
+ }
+ }
+
+ /* The request could not be fulfilled using a freelist slot. Check
+ ** to see if defragmentation is necessary.
+ */
+ testcase( gap+2+nByte==top );
+ if( gap+2+nByte>top ){
+ assert( pPage->nCell>0 || CORRUPT_DB );
+ rc = defragmentPage(pPage);
+ if( rc ) return rc;
+ top = get2byteNotZero(&data[hdr+5]);
+ assert( gap+nByte<=top );
+ }
+
+
+ /* Allocate memory from the gap in between the cell pointer array
+ ** and the cell content area. The btreeInitPage() call has already
+ ** validated the freelist. Given that the freelist is valid, there
+ ** is no way that the allocation can extend off the end of the page.
+ ** The assert() below verifies the previous sentence.
+ */
+ top -= nByte;
+ put2byte(&data[hdr+5], top);
+ assert( top+nByte <= (int)pPage->pBt->usableSize );
+ *pIdx = top;
+ return SQLITE_OK;
+}
+
+/*
+** Return a section of the pPage->aData to the freelist.
+** The first byte of the new free block is pPage->aData[iStart]
+** and the size of the block is iSize bytes.
+**
+** Adjacent freeblocks are coalesced.
+**
+** Note that even though the freeblock list was checked by btreeInitPage(),
+** that routine will not detect overlap between cells or freeblocks. Nor
+** does it detect cells or freeblocks that encrouch into the reserved bytes
+** at the end of the page. So do additional corruption checks inside this
+** routine and return SQLITE_CORRUPT if any problems are found.
+*/
+static int freeSpace(MemPage *pPage, u16 iStart, u16 iSize){
+ u16 iPtr; /* Address of ptr to next freeblock */
+ u16 iFreeBlk; /* Address of the next freeblock */
+ u8 hdr; /* Page header size. 0 or 100 */
+ u8 nFrag = 0; /* Reduction in fragmentation */
+ u16 iOrigSize = iSize; /* Original value of iSize */
+ u32 iLast = pPage->pBt->usableSize-4; /* Largest possible freeblock offset */
+ u32 iEnd = iStart + iSize; /* First byte past the iStart buffer */
+ unsigned char *data = pPage->aData; /* Page content */
+
+ assert( pPage->pBt!=0 );
+ assert( sqlite3PagerIswriteable(pPage->pDbPage) );
+ assert( CORRUPT_DB || iStart>=pPage->hdrOffset+6+pPage->childPtrSize );
+ assert( CORRUPT_DB || iEnd <= pPage->pBt->usableSize );
+ assert( sqlite3_mutex_held(pPage->pBt->mutex) );
+ assert( iSize>=4 ); /* Minimum cell size is 4 */
+ assert( iStart<=iLast );
+
+ /* Overwrite deleted information with zeros when the secure_delete
+ ** option is enabled */
+ if( pPage->pBt->btsFlags & BTS_SECURE_DELETE ){
+ memset(&data[iStart], 0, iSize);
+ }
+
+ /* The list of freeblocks must be in ascending order. Find the
+ ** spot on the list where iStart should be inserted.
+ */
+ hdr = pPage->hdrOffset;
+ iPtr = hdr + 1;
+ if( data[iPtr+1]==0 && data[iPtr]==0 ){
+ iFreeBlk = 0; /* Shortcut for the case when the freelist is empty */
+ }else{
+ while( (iFreeBlk = get2byte(&data[iPtr]))<iStart ){
+ if( iFreeBlk<iPtr+4 ){
+ if( iFreeBlk==0 ) break;
+ return SQLITE_CORRUPT_BKPT;
+ }
+ iPtr = iFreeBlk;
+ }
+ if( iFreeBlk>iLast ) return SQLITE_CORRUPT_BKPT;
+ assert( iFreeBlk>iPtr || iFreeBlk==0 );
+
+ /* At this point:
+ ** iFreeBlk: First freeblock after iStart, or zero if none
+ ** iPtr: The address of a pointer to iFreeBlk
+ **
+ ** Check to see if iFreeBlk should be coalesced onto the end of iStart.
+ */
+ if( iFreeBlk && iEnd+3>=iFreeBlk ){
+ nFrag = iFreeBlk - iEnd;
+ if( iEnd>iFreeBlk ) return SQLITE_CORRUPT_BKPT;
+ iEnd = iFreeBlk + get2byte(&data[iFreeBlk+2]);
+ if( iEnd > pPage->pBt->usableSize ) return SQLITE_CORRUPT_BKPT;
+ iSize = iEnd - iStart;
+ iFreeBlk = get2byte(&data[iFreeBlk]);
+ }
+
+ /* If iPtr is another freeblock (that is, if iPtr is not the freelist
+ ** pointer in the page header) then check to see if iStart should be
+ ** coalesced onto the end of iPtr.
+ */
+ if( iPtr>hdr+1 ){
+ int iPtrEnd = iPtr + get2byte(&data[iPtr+2]);
+ if( iPtrEnd+3>=iStart ){
+ if( iPtrEnd>iStart ) return SQLITE_CORRUPT_BKPT;
+ nFrag += iStart - iPtrEnd;
+ iSize = iEnd - iPtr;
+ iStart = iPtr;
+ }
+ }
+ if( nFrag>data[hdr+7] ) return SQLITE_CORRUPT_BKPT;
+ data[hdr+7] -= nFrag;
+ }
+ if( iStart==get2byte(&data[hdr+5]) ){
+ /* The new freeblock is at the beginning of the cell content area,
+ ** so just extend the cell content area rather than create another
+ ** freelist entry */
+ if( iPtr!=hdr+1 ) return SQLITE_CORRUPT_BKPT;
+ put2byte(&data[hdr+1], iFreeBlk);
+ put2byte(&data[hdr+5], iEnd);
+ }else{
+ /* Insert the new freeblock into the freelist */
+ put2byte(&data[iPtr], iStart);
+ put2byte(&data[iStart], iFreeBlk);
+ put2byte(&data[iStart+2], iSize);
+ }
+ pPage->nFree += iOrigSize;
+ return SQLITE_OK;
+}
+
+/*
+** Decode the flags byte (the first byte of the header) for a page
+** and initialize fields of the MemPage structure accordingly.
+**
+** Only the following combinations are supported. Anything different
+** indicates a corrupt database files:
+**
+** PTF_ZERODATA
+** PTF_ZERODATA | PTF_LEAF
+** PTF_LEAFDATA | PTF_INTKEY
+** PTF_LEAFDATA | PTF_INTKEY | PTF_LEAF
+*/
+static int decodeFlags(MemPage *pPage, int flagByte){
+ BtShared *pBt; /* A copy of pPage->pBt */
+
+ assert( pPage->hdrOffset==(pPage->pgno==1 ? 100 : 0) );
+ assert( sqlite3_mutex_held(pPage->pBt->mutex) );
+ pPage->leaf = (u8)(flagByte>>3); assert( PTF_LEAF == 1<<3 );
+ flagByte &= ~PTF_LEAF;
+ pPage->childPtrSize = 4-4*pPage->leaf;
+ pPage->xCellSize = cellSizePtr;
+ pBt = pPage->pBt;
+ if( flagByte==(PTF_LEAFDATA | PTF_INTKEY) ){
+ /* EVIDENCE-OF: R-07291-35328 A value of 5 (0x05) means the page is an
+ ** interior table b-tree page. */
+ assert( (PTF_LEAFDATA|PTF_INTKEY)==5 );
+ /* EVIDENCE-OF: R-26900-09176 A value of 13 (0x0d) means the page is a
+ ** leaf table b-tree page. */
+ assert( (PTF_LEAFDATA|PTF_INTKEY|PTF_LEAF)==13 );
+ pPage->intKey = 1;
+ if( pPage->leaf ){
+ pPage->intKeyLeaf = 1;
+ pPage->xParseCell = btreeParseCellPtr;
+ }else{
+ pPage->intKeyLeaf = 0;
+ pPage->xCellSize = cellSizePtrNoPayload;
+ pPage->xParseCell = btreeParseCellPtrNoPayload;
+ }
+ pPage->maxLocal = pBt->maxLeaf;
+ pPage->minLocal = pBt->minLeaf;
+ }else if( flagByte==PTF_ZERODATA ){
+ /* EVIDENCE-OF: R-43316-37308 A value of 2 (0x02) means the page is an
+ ** interior index b-tree page. */
+ assert( (PTF_ZERODATA)==2 );
+ /* EVIDENCE-OF: R-59615-42828 A value of 10 (0x0a) means the page is a
+ ** leaf index b-tree page. */
+ assert( (PTF_ZERODATA|PTF_LEAF)==10 );
+ pPage->intKey = 0;
+ pPage->intKeyLeaf = 0;
+ pPage->xParseCell = btreeParseCellPtrIndex;
+ pPage->maxLocal = pBt->maxLocal;
+ pPage->minLocal = pBt->minLocal;
+ }else{
+ /* EVIDENCE-OF: R-47608-56469 Any other value for the b-tree page type is
+ ** an error. */
+ return SQLITE_CORRUPT_BKPT;
+ }
+ pPage->max1bytePayload = pBt->max1bytePayload;
+ return SQLITE_OK;
+}
+
+/*
+** Initialize the auxiliary information for a disk block.
+**
+** Return SQLITE_OK on success. If we see that the page does
+** not contain a well-formed database page, then return
+** SQLITE_CORRUPT. Note that a return of SQLITE_OK does not
+** guarantee that the page is well-formed. It only shows that
+** we failed to detect any corruption.
+*/
+static int btreeInitPage(MemPage *pPage){
+
+ assert( pPage->pBt!=0 );
+ assert( pPage->pBt->db!=0 );
+ assert( sqlite3_mutex_held(pPage->pBt->mutex) );
+ assert( pPage->pgno==sqlite3PagerPagenumber(pPage->pDbPage) );
+ assert( pPage == sqlite3PagerGetExtra(pPage->pDbPage) );
+ assert( pPage->aData == sqlite3PagerGetData(pPage->pDbPage) );
+
+ if( !pPage->isInit ){
+ int pc; /* Address of a freeblock within pPage->aData[] */
+ u8 hdr; /* Offset to beginning of page header */
+ u8 *data; /* Equal to pPage->aData */
+ BtShared *pBt; /* The main btree structure */
+ int usableSize; /* Amount of usable space on each page */
+ u16 cellOffset; /* Offset from start of page to first cell pointer */
+ int nFree; /* Number of unused bytes on the page */
+ int top; /* First byte of the cell content area */
+ int iCellFirst; /* First allowable cell or freeblock offset */
+ int iCellLast; /* Last possible cell or freeblock offset */
+
+ pBt = pPage->pBt;
+
+ hdr = pPage->hdrOffset;
+ data = pPage->aData;
+ /* EVIDENCE-OF: R-28594-02890 The one-byte flag at offset 0 indicating
+ ** the b-tree page type. */
+ if( decodeFlags(pPage, data[hdr]) ) return SQLITE_CORRUPT_BKPT;
+ assert( pBt->pageSize>=512 && pBt->pageSize<=65536 );
+ pPage->maskPage = (u16)(pBt->pageSize - 1);
+ pPage->nOverflow = 0;
+ usableSize = pBt->usableSize;
+ pPage->cellOffset = cellOffset = hdr + 8 + pPage->childPtrSize;
+ pPage->aDataEnd = &data[usableSize];
+ pPage->aCellIdx = &data[cellOffset];
+ pPage->aDataOfst = &data[pPage->childPtrSize];
+ /* EVIDENCE-OF: R-58015-48175 The two-byte integer at offset 5 designates
+ ** the start of the cell content area. A zero value for this integer is
+ ** interpreted as 65536. */
+ top = get2byteNotZero(&data[hdr+5]);
+ /* EVIDENCE-OF: R-37002-32774 The two-byte integer at offset 3 gives the
+ ** number of cells on the page. */
+ pPage->nCell = get2byte(&data[hdr+3]);
+ if( pPage->nCell>MX_CELL(pBt) ){
+ /* To many cells for a single page. The page must be corrupt */
+ return SQLITE_CORRUPT_BKPT;
+ }
+ testcase( pPage->nCell==MX_CELL(pBt) );
+ /* EVIDENCE-OF: R-24089-57979 If a page contains no cells (which is only
+ ** possible for a root page of a table that contains no rows) then the
+ ** offset to the cell content area will equal the page size minus the
+ ** bytes of reserved space. */
+ assert( pPage->nCell>0 || top==usableSize || CORRUPT_DB );
+
+ /* A malformed database page might cause us to read past the end
+ ** of page when parsing a cell.
+ **
+ ** The following block of code checks early to see if a cell extends
+ ** past the end of a page boundary and causes SQLITE_CORRUPT to be
+ ** returned if it does.
+ */
+ iCellFirst = cellOffset + 2*pPage->nCell;
+ iCellLast = usableSize - 4;
+ if( pBt->db->flags & SQLITE_CellSizeCk ){
+ int i; /* Index into the cell pointer array */
+ int sz; /* Size of a cell */
+
+ if( !pPage->leaf ) iCellLast--;
+ for(i=0; i<pPage->nCell; i++){
+ pc = get2byteAligned(&data[cellOffset+i*2]);
+ testcase( pc==iCellFirst );
+ testcase( pc==iCellLast );
+ if( pc<iCellFirst || pc>iCellLast ){
+ return SQLITE_CORRUPT_BKPT;
+ }
+ sz = pPage->xCellSize(pPage, &data[pc]);
+ testcase( pc+sz==usableSize );
+ if( pc+sz>usableSize ){
+ return SQLITE_CORRUPT_BKPT;
+ }
+ }
+ if( !pPage->leaf ) iCellLast++;
+ }
+
+ /* Compute the total free space on the page
+ ** EVIDENCE-OF: R-23588-34450 The two-byte integer at offset 1 gives the
+ ** start of the first freeblock on the page, or is zero if there are no
+ ** freeblocks. */
+ pc = get2byte(&data[hdr+1]);
+ nFree = data[hdr+7] + top; /* Init nFree to non-freeblock free space */
+ if( pc>0 ){
+ u32 next, size;
+ if( pc<iCellFirst ){
+ /* EVIDENCE-OF: R-55530-52930 In a well-formed b-tree page, there will
+ ** always be at least one cell before the first freeblock.
+ */
+ return SQLITE_CORRUPT_BKPT;
+ }
+ while( 1 ){
+ if( pc>iCellLast ){
+ return SQLITE_CORRUPT_BKPT; /* Freeblock off the end of the page */
+ }
+ next = get2byte(&data[pc]);
+ size = get2byte(&data[pc+2]);
+ nFree = nFree + size;
+ if( next<=pc+size+3 ) break;
+ pc = next;
+ }
+ if( next>0 ){
+ return SQLITE_CORRUPT_BKPT; /* Freeblock not in ascending order */
+ }
+ if( pc+size>(unsigned int)usableSize ){
+ return SQLITE_CORRUPT_BKPT; /* Last freeblock extends past page end */
+ }
+ }
+
+ /* At this point, nFree contains the sum of the offset to the start
+ ** of the cell-content area plus the number of free bytes within
+ ** the cell-content area. If this is greater than the usable-size
+ ** of the page, then the page must be corrupted. This check also
+ ** serves to verify that the offset to the start of the cell-content
+ ** area, according to the page header, lies within the page.
+ */
+ if( nFree>usableSize ){
+ return SQLITE_CORRUPT_BKPT;
+ }
+ pPage->nFree = (u16)(nFree - iCellFirst);
+ pPage->isInit = 1;
+ }
+ return SQLITE_OK;
+}
+
+/*
+** Set up a raw page so that it looks like a database page holding
+** no entries.
+*/
+static void zeroPage(MemPage *pPage, int flags){
+ unsigned char *data = pPage->aData;
+ BtShared *pBt = pPage->pBt;
+ u8 hdr = pPage->hdrOffset;
+ u16 first;
+
+ assert( sqlite3PagerPagenumber(pPage->pDbPage)==pPage->pgno );
+ assert( sqlite3PagerGetExtra(pPage->pDbPage) == (void*)pPage );
+ assert( sqlite3PagerGetData(pPage->pDbPage) == data );
+ assert( sqlite3PagerIswriteable(pPage->pDbPage) );
+ assert( sqlite3_mutex_held(pBt->mutex) );
+ if( pBt->btsFlags & BTS_SECURE_DELETE ){
+ memset(&data[hdr], 0, pBt->usableSize - hdr);
+ }
+ data[hdr] = (char)flags;
+ first = hdr + ((flags&PTF_LEAF)==0 ? 12 : 8);
+ memset(&data[hdr+1], 0, 4);
+ data[hdr+7] = 0;
+ put2byte(&data[hdr+5], pBt->usableSize);
+ pPage->nFree = (u16)(pBt->usableSize - first);
+ decodeFlags(pPage, flags);
+ pPage->cellOffset = first;
+ pPage->aDataEnd = &data[pBt->usableSize];
+ pPage->aCellIdx = &data[first];
+ pPage->aDataOfst = &data[pPage->childPtrSize];
+ pPage->nOverflow = 0;
+ assert( pBt->pageSize>=512 && pBt->pageSize<=65536 );
+ pPage->maskPage = (u16)(pBt->pageSize - 1);
+ pPage->nCell = 0;
+ pPage->isInit = 1;
+}
+
+
+/*
+** Convert a DbPage obtained from the pager into a MemPage used by
+** the btree layer.
+*/
+static MemPage *btreePageFromDbPage(DbPage *pDbPage, Pgno pgno, BtShared *pBt){
+ MemPage *pPage = (MemPage*)sqlite3PagerGetExtra(pDbPage);
+ if( pgno!=pPage->pgno ){
+ pPage->aData = sqlite3PagerGetData(pDbPage);
+ pPage->pDbPage = pDbPage;
+ pPage->pBt = pBt;
+ pPage->pgno = pgno;
+ pPage->hdrOffset = pgno==1 ? 100 : 0;
+ }
+ assert( pPage->aData==sqlite3PagerGetData(pDbPage) );
+ return pPage;
+}
+
+/*
+** Get a page from the pager. Initialize the MemPage.pBt and
+** MemPage.aData elements if needed. See also: btreeGetUnusedPage().
+**
+** If the PAGER_GET_NOCONTENT flag is set, it means that we do not care
+** about the content of the page at this time. So do not go to the disk
+** to fetch the content. Just fill in the content with zeros for now.
+** If in the future we call sqlite3PagerWrite() on this page, that
+** means we have started to be concerned about content and the disk
+** read should occur at that point.
+*/
+static int btreeGetPage(
+ BtShared *pBt, /* The btree */
+ Pgno pgno, /* Number of the page to fetch */
+ MemPage **ppPage, /* Return the page in this parameter */
+ int flags /* PAGER_GET_NOCONTENT or PAGER_GET_READONLY */
+){
+ int rc;
+ DbPage *pDbPage;
+
+ assert( flags==0 || flags==PAGER_GET_NOCONTENT || flags==PAGER_GET_READONLY );
+ assert( sqlite3_mutex_held(pBt->mutex) );
+ rc = sqlite3PagerGet(pBt->pPager, pgno, (DbPage**)&pDbPage, flags);
+ if( rc ) return rc;
+ *ppPage = btreePageFromDbPage(pDbPage, pgno, pBt);
+ return SQLITE_OK;
+}
+
+/*
+** Retrieve a page from the pager cache. If the requested page is not
+** already in the pager cache return NULL. Initialize the MemPage.pBt and
+** MemPage.aData elements if needed.
+*/
+static MemPage *btreePageLookup(BtShared *pBt, Pgno pgno){
+ DbPage *pDbPage;
+ assert( sqlite3_mutex_held(pBt->mutex) );
+ pDbPage = sqlite3PagerLookup(pBt->pPager, pgno);
+ if( pDbPage ){
+ return btreePageFromDbPage(pDbPage, pgno, pBt);
+ }
+ return 0;
+}
+
+/*
+** Return the size of the database file in pages. If there is any kind of
+** error, return ((unsigned int)-1).
+*/
+static Pgno btreePagecount(BtShared *pBt){
+ return pBt->nPage;
+}
+SQLITE_PRIVATE u32 sqlite3BtreeLastPage(Btree *p){
+ assert( sqlite3BtreeHoldsMutex(p) );
+ assert( ((p->pBt->nPage)&0x8000000)==0 );
+ return btreePagecount(p->pBt);
+}
+
+/*
+** Get a page from the pager and initialize it.
+**
+** If pCur!=0 then the page is being fetched as part of a moveToChild()
+** call. Do additional sanity checking on the page in this case.
+** And if the fetch fails, this routine must decrement pCur->iPage.
+**
+** The page is fetched as read-write unless pCur is not NULL and is
+** a read-only cursor.
+**
+** If an error occurs, then *ppPage is undefined. It
+** may remain unchanged, or it may be set to an invalid value.
+*/
+static int getAndInitPage(
+ BtShared *pBt, /* The database file */
+ Pgno pgno, /* Number of the page to get */
+ MemPage **ppPage, /* Write the page pointer here */
+ BtCursor *pCur, /* Cursor to receive the page, or NULL */
+ int bReadOnly /* True for a read-only page */
+){
+ int rc;
+ DbPage *pDbPage;
+ assert( sqlite3_mutex_held(pBt->mutex) );
+ assert( pCur==0 || ppPage==&pCur->apPage[pCur->iPage] );
+ assert( pCur==0 || bReadOnly==pCur->curPagerFlags );
+ assert( pCur==0 || pCur->iPage>0 );
+
+ if( pgno>btreePagecount(pBt) ){
+ rc = SQLITE_CORRUPT_BKPT;
+ goto getAndInitPage_error;
+ }
+ rc = sqlite3PagerGet(pBt->pPager, pgno, (DbPage**)&pDbPage, bReadOnly);
+ if( rc ){
+ goto getAndInitPage_error;
+ }
+ *ppPage = (MemPage*)sqlite3PagerGetExtra(pDbPage);
+ if( (*ppPage)->isInit==0 ){
+ btreePageFromDbPage(pDbPage, pgno, pBt);
+ rc = btreeInitPage(*ppPage);
+ if( rc!=SQLITE_OK ){
+ releasePage(*ppPage);
+ goto getAndInitPage_error;
+ }
+ }
+ assert( (*ppPage)->pgno==pgno );
+ assert( (*ppPage)->aData==sqlite3PagerGetData(pDbPage) );
+
+ /* If obtaining a child page for a cursor, we must verify that the page is
+ ** compatible with the root page. */
+ if( pCur && ((*ppPage)->nCell<1 || (*ppPage)->intKey!=pCur->curIntKey) ){
+ rc = SQLITE_CORRUPT_BKPT;
+ releasePage(*ppPage);
+ goto getAndInitPage_error;
+ }
+ return SQLITE_OK;
+
+getAndInitPage_error:
+ if( pCur ) pCur->iPage--;
+ testcase( pgno==0 );
+ assert( pgno!=0 || rc==SQLITE_CORRUPT );
+ return rc;
+}
+
+/*
+** Release a MemPage. This should be called once for each prior
+** call to btreeGetPage.
+*/
+static void releasePageNotNull(MemPage *pPage){
+ assert( pPage->aData );
+ assert( pPage->pBt );
+ assert( pPage->pDbPage!=0 );
+ assert( sqlite3PagerGetExtra(pPage->pDbPage) == (void*)pPage );
+ assert( sqlite3PagerGetData(pPage->pDbPage)==pPage->aData );
+ assert( sqlite3_mutex_held(pPage->pBt->mutex) );
+ sqlite3PagerUnrefNotNull(pPage->pDbPage);
+}
+static void releasePage(MemPage *pPage){
+ if( pPage ) releasePageNotNull(pPage);
+}
+
+/*
+** Get an unused page.
+**
+** This works just like btreeGetPage() with the addition:
+**
+** * If the page is already in use for some other purpose, immediately
+** release it and return an SQLITE_CURRUPT error.
+** * Make sure the isInit flag is clear
+*/
+static int btreeGetUnusedPage(
+ BtShared *pBt, /* The btree */
+ Pgno pgno, /* Number of the page to fetch */
+ MemPage **ppPage, /* Return the page in this parameter */
+ int flags /* PAGER_GET_NOCONTENT or PAGER_GET_READONLY */
+){
+ int rc = btreeGetPage(pBt, pgno, ppPage, flags);
+ if( rc==SQLITE_OK ){
+ if( sqlite3PagerPageRefcount((*ppPage)->pDbPage)>1 ){
+ releasePage(*ppPage);
+ *ppPage = 0;
+ return SQLITE_CORRUPT_BKPT;
+ }
+ (*ppPage)->isInit = 0;
+ }else{
+ *ppPage = 0;
+ }
+ return rc;
+}
+
+
+/*
+** During a rollback, when the pager reloads information into the cache
+** so that the cache is restored to its original state at the start of
+** the transaction, for each page restored this routine is called.
+**
+** This routine needs to reset the extra data section at the end of the
+** page to agree with the restored data.
+*/
+static void pageReinit(DbPage *pData){
+ MemPage *pPage;
+ pPage = (MemPage *)sqlite3PagerGetExtra(pData);
+ assert( sqlite3PagerPageRefcount(pData)>0 );
+ if( pPage->isInit ){
+ assert( sqlite3_mutex_held(pPage->pBt->mutex) );
+ pPage->isInit = 0;
+ if( sqlite3PagerPageRefcount(pData)>1 ){
+ /* pPage might not be a btree page; it might be an overflow page
+ ** or ptrmap page or a free page. In those cases, the following
+ ** call to btreeInitPage() will likely return SQLITE_CORRUPT.
+ ** But no harm is done by this. And it is very important that
+ ** btreeInitPage() be called on every btree page so we make
+ ** the call for every page that comes in for re-initing. */
+ btreeInitPage(pPage);
+ }
+ }
+}
+
+/*
+** Invoke the busy handler for a btree.
+*/
+static int btreeInvokeBusyHandler(void *pArg){
+ BtShared *pBt = (BtShared*)pArg;
+ assert( pBt->db );
+ assert( sqlite3_mutex_held(pBt->db->mutex) );
+ return sqlite3InvokeBusyHandler(&pBt->db->busyHandler);
+}
+
+/*
+** Open a database file.
+**
+** zFilename is the name of the database file. If zFilename is NULL
+** then an ephemeral database is created. The ephemeral database might
+** be exclusively in memory, or it might use a disk-based memory cache.
+** Either way, the ephemeral database will be automatically deleted
+** when sqlite3BtreeClose() is called.
+**
+** If zFilename is ":memory:" then an in-memory database is created
+** that is automatically destroyed when it is closed.
+**
+** The "flags" parameter is a bitmask that might contain bits like
+** BTREE_OMIT_JOURNAL and/or BTREE_MEMORY.
+**
+** If the database is already opened in the same database connection
+** and we are in shared cache mode, then the open will fail with an
+** SQLITE_CONSTRAINT error. We cannot allow two or more BtShared
+** objects in the same database connection since doing so will lead
+** to problems with locking.
+*/
+SQLITE_PRIVATE int sqlite3BtreeOpen(
+ sqlite3_vfs *pVfs, /* VFS to use for this b-tree */
+ const char *zFilename, /* Name of the file containing the BTree database */
+ sqlite3 *db, /* Associated database handle */
+ Btree **ppBtree, /* Pointer to new Btree object written here */
+ int flags, /* Options */
+ int vfsFlags /* Flags passed through to sqlite3_vfs.xOpen() */
+){
+ BtShared *pBt = 0; /* Shared part of btree structure */
+ Btree *p; /* Handle to return */
+ sqlite3_mutex *mutexOpen = 0; /* Prevents a race condition. Ticket #3537 */
+ int rc = SQLITE_OK; /* Result code from this function */
+ u8 nReserve; /* Byte of unused space on each page */
+ unsigned char zDbHeader[100]; /* Database header content */
+
+ /* True if opening an ephemeral, temporary database */
+ const int isTempDb = zFilename==0 || zFilename[0]==0;
+
+ /* Set the variable isMemdb to true for an in-memory database, or
+ ** false for a file-based database.
+ */
+#ifdef SQLITE_OMIT_MEMORYDB
+ const int isMemdb = 0;
+#else
+ const int isMemdb = (zFilename && strcmp(zFilename, ":memory:")==0)
+ || (isTempDb && sqlite3TempInMemory(db))
+ || (vfsFlags & SQLITE_OPEN_MEMORY)!=0;
+#endif
+
+ assert( db!=0 );
+ assert( pVfs!=0 );
+ assert( sqlite3_mutex_held(db->mutex) );
+ assert( (flags&0xff)==flags ); /* flags fit in 8 bits */
+
+ /* Only a BTREE_SINGLE database can be BTREE_UNORDERED */
+ assert( (flags & BTREE_UNORDERED)==0 || (flags & BTREE_SINGLE)!=0 );
+
+ /* A BTREE_SINGLE database is always a temporary and/or ephemeral */
+ assert( (flags & BTREE_SINGLE)==0 || isTempDb );
+
+ if( isMemdb ){
+ flags |= BTREE_MEMORY;
+ }
+ if( (vfsFlags & SQLITE_OPEN_MAIN_DB)!=0 && (isMemdb || isTempDb) ){
+ vfsFlags = (vfsFlags & ~SQLITE_OPEN_MAIN_DB) | SQLITE_OPEN_TEMP_DB;
+ }
+ p = sqlite3MallocZero(sizeof(Btree));
+ if( !p ){
+ return SQLITE_NOMEM_BKPT;
+ }
+ p->inTrans = TRANS_NONE;
+ p->db = db;
+#ifndef SQLITE_OMIT_SHARED_CACHE
+ p->lock.pBtree = p;
+ p->lock.iTable = 1;
+#endif
+
+#if !defined(SQLITE_OMIT_SHARED_CACHE) && !defined(SQLITE_OMIT_DISKIO)
+ /*
+ ** If this Btree is a candidate for shared cache, try to find an
+ ** existing BtShared object that we can share with
+ */
+ if( isTempDb==0 && (isMemdb==0 || (vfsFlags&SQLITE_OPEN_URI)!=0) ){
+ if( vfsFlags & SQLITE_OPEN_SHAREDCACHE ){
+ int nFilename = sqlite3Strlen30(zFilename)+1;
+ int nFullPathname = pVfs->mxPathname+1;
+ char *zFullPathname = sqlite3Malloc(MAX(nFullPathname,nFilename));
+ MUTEX_LOGIC( sqlite3_mutex *mutexShared; )
+
+ p->sharable = 1;
+ if( !zFullPathname ){
+ sqlite3_free(p);
+ return SQLITE_NOMEM_BKPT;
+ }
+ if( isMemdb ){
+ memcpy(zFullPathname, zFilename, nFilename);
+ }else{
+ rc = sqlite3OsFullPathname(pVfs, zFilename,
+ nFullPathname, zFullPathname);
+ if( rc ){
+ sqlite3_free(zFullPathname);
+ sqlite3_free(p);
+ return rc;
+ }
+ }
+#if SQLITE_THREADSAFE
+ mutexOpen = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_OPEN);
+ sqlite3_mutex_enter(mutexOpen);
+ mutexShared = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER);
+ sqlite3_mutex_enter(mutexShared);
+#endif
+ for(pBt=GLOBAL(BtShared*,sqlite3SharedCacheList); pBt; pBt=pBt->pNext){
+ assert( pBt->nRef>0 );
+ if( 0==strcmp(zFullPathname, sqlite3PagerFilename(pBt->pPager, 0))
+ && sqlite3PagerVfs(pBt->pPager)==pVfs ){
+ int iDb;
+ for(iDb=db->nDb-1; iDb>=0; iDb--){
+ Btree *pExisting = db->aDb[iDb].pBt;
+ if( pExisting && pExisting->pBt==pBt ){
+ sqlite3_mutex_leave(mutexShared);
+ sqlite3_mutex_leave(mutexOpen);
+ sqlite3_free(zFullPathname);
+ sqlite3_free(p);
+ return SQLITE_CONSTRAINT;
+ }
+ }
+ p->pBt = pBt;
+ pBt->nRef++;
+ break;
+ }
+ }
+ sqlite3_mutex_leave(mutexShared);
+ sqlite3_free(zFullPathname);
+ }
+#ifdef SQLITE_DEBUG
+ else{
+ /* In debug mode, we mark all persistent databases as sharable
+ ** even when they are not. This exercises the locking code and
+ ** gives more opportunity for asserts(sqlite3_mutex_held())
+ ** statements to find locking problems.
+ */
+ p->sharable = 1;
+ }
+#endif
+ }
+#endif
+ if( pBt==0 ){
+ /*
+ ** The following asserts make sure that structures used by the btree are
+ ** the right size. This is to guard against size changes that result
+ ** when compiling on a different architecture.
+ */
+ assert( sizeof(i64)==8 );
+ assert( sizeof(u64)==8 );
+ assert( sizeof(u32)==4 );
+ assert( sizeof(u16)==2 );
+ assert( sizeof(Pgno)==4 );
+
+ pBt = sqlite3MallocZero( sizeof(*pBt) );
+ if( pBt==0 ){
+ rc = SQLITE_NOMEM_BKPT;
+ goto btree_open_out;
+ }
+ rc = sqlite3PagerOpen(pVfs, &pBt->pPager, zFilename,
+ sizeof(MemPage), flags, vfsFlags, pageReinit);
+ if( rc==SQLITE_OK ){
+ sqlite3PagerSetMmapLimit(pBt->pPager, db->szMmap);
+ rc = sqlite3PagerReadFileheader(pBt->pPager,sizeof(zDbHeader),zDbHeader);
+ }
+ if( rc!=SQLITE_OK ){
+ goto btree_open_out;
+ }
+ pBt->openFlags = (u8)flags;
+ pBt->db = db;
+ sqlite3PagerSetBusyhandler(pBt->pPager, btreeInvokeBusyHandler, pBt);
+ p->pBt = pBt;
+
+ pBt->pCursor = 0;
+ pBt->pPage1 = 0;
+ if( sqlite3PagerIsreadonly(pBt->pPager) ) pBt->btsFlags |= BTS_READ_ONLY;
+#ifdef SQLITE_SECURE_DELETE
+ pBt->btsFlags |= BTS_SECURE_DELETE;
+#endif
+ /* EVIDENCE-OF: R-51873-39618 The page size for a database file is
+ ** determined by the 2-byte integer located at an offset of 16 bytes from
+ ** the beginning of the database file. */
+ pBt->pageSize = (zDbHeader[16]<<8) | (zDbHeader[17]<<16);
+ if( pBt->pageSize<512 || pBt->pageSize>SQLITE_MAX_PAGE_SIZE
+ || ((pBt->pageSize-1)&pBt->pageSize)!=0 ){
+ pBt->pageSize = 0;
+#ifndef SQLITE_OMIT_AUTOVACUUM
+ /* If the magic name ":memory:" will create an in-memory database, then
+ ** leave the autoVacuum mode at 0 (do not auto-vacuum), even if
+ ** SQLITE_DEFAULT_AUTOVACUUM is true. On the other hand, if
+ ** SQLITE_OMIT_MEMORYDB has been defined, then ":memory:" is just a
+ ** regular file-name. In this case the auto-vacuum applies as per normal.
+ */
+ if( zFilename && !isMemdb ){
+ pBt->autoVacuum = (SQLITE_DEFAULT_AUTOVACUUM ? 1 : 0);
+ pBt->incrVacuum = (SQLITE_DEFAULT_AUTOVACUUM==2 ? 1 : 0);
+ }
+#endif
+ nReserve = 0;
+ }else{
+ /* EVIDENCE-OF: R-37497-42412 The size of the reserved region is
+ ** determined by the one-byte unsigned integer found at an offset of 20
+ ** into the database file header. */
+ nReserve = zDbHeader[20];
+ pBt->btsFlags |= BTS_PAGESIZE_FIXED;
+#ifndef SQLITE_OMIT_AUTOVACUUM
+ pBt->autoVacuum = (get4byte(&zDbHeader[36 + 4*4])?1:0);
+ pBt->incrVacuum = (get4byte(&zDbHeader[36 + 7*4])?1:0);
+#endif
+ }
+ rc = sqlite3PagerSetPagesize(pBt->pPager, &pBt->pageSize, nReserve);
+ if( rc ) goto btree_open_out;
+ pBt->usableSize = pBt->pageSize - nReserve;
+ assert( (pBt->pageSize & 7)==0 ); /* 8-byte alignment of pageSize */
+
+#if !defined(SQLITE_OMIT_SHARED_CACHE) && !defined(SQLITE_OMIT_DISKIO)
+ /* Add the new BtShared object to the linked list sharable BtShareds.
+ */
+ pBt->nRef = 1;
+ if( p->sharable ){
+ MUTEX_LOGIC( sqlite3_mutex *mutexShared; )
+ MUTEX_LOGIC( mutexShared = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER);)
+ if( SQLITE_THREADSAFE && sqlite3GlobalConfig.bCoreMutex ){
+ pBt->mutex = sqlite3MutexAlloc(SQLITE_MUTEX_FAST);
+ if( pBt->mutex==0 ){
+ rc = SQLITE_NOMEM_BKPT;
+ goto btree_open_out;
+ }
+ }
+ sqlite3_mutex_enter(mutexShared);
+ pBt->pNext = GLOBAL(BtShared*,sqlite3SharedCacheList);
+ GLOBAL(BtShared*,sqlite3SharedCacheList) = pBt;
+ sqlite3_mutex_leave(mutexShared);
+ }
+#endif
+ }
+
+#if !defined(SQLITE_OMIT_SHARED_CACHE) && !defined(SQLITE_OMIT_DISKIO)
+ /* If the new Btree uses a sharable pBtShared, then link the new
+ ** Btree into the list of all sharable Btrees for the same connection.
+ ** The list is kept in ascending order by pBt address.
+ */
+ if( p->sharable ){
+ int i;
+ Btree *pSib;
+ for(i=0; i<db->nDb; i++){
+ if( (pSib = db->aDb[i].pBt)!=0 && pSib->sharable ){
+ while( pSib->pPrev ){ pSib = pSib->pPrev; }
+ if( (uptr)p->pBt<(uptr)pSib->pBt ){
+ p->pNext = pSib;
+ p->pPrev = 0;
+ pSib->pPrev = p;
+ }else{
+ while( pSib->pNext && (uptr)pSib->pNext->pBt<(uptr)p->pBt ){
+ pSib = pSib->pNext;
+ }
+ p->pNext = pSib->pNext;
+ p->pPrev = pSib;
+ if( p->pNext ){
+ p->pNext->pPrev = p;
+ }
+ pSib->pNext = p;
+ }
+ break;
+ }
+ }
+ }
+#endif
+ *ppBtree = p;
+
+btree_open_out:
+ if( rc!=SQLITE_OK ){
+ if( pBt && pBt->pPager ){
+ sqlite3PagerClose(pBt->pPager, 0);
+ }
+ sqlite3_free(pBt);
+ sqlite3_free(p);
+ *ppBtree = 0;
+ }else{
+ sqlite3_file *pFile;
+
+ /* If the B-Tree was successfully opened, set the pager-cache size to the
+ ** default value. Except, when opening on an existing shared pager-cache,
+ ** do not change the pager-cache size.
+ */
+ if( sqlite3BtreeSchema(p, 0, 0)==0 ){
+ sqlite3PagerSetCachesize(p->pBt->pPager, SQLITE_DEFAULT_CACHE_SIZE);
+ }
+
+ pFile = sqlite3PagerFile(pBt->pPager);
+ if( pFile->pMethods ){
+ sqlite3OsFileControlHint(pFile, SQLITE_FCNTL_PDB, (void*)&pBt->db);
+ }
+ }
+ if( mutexOpen ){
+ assert( sqlite3_mutex_held(mutexOpen) );
+ sqlite3_mutex_leave(mutexOpen);
+ }
+ assert( rc!=SQLITE_OK || sqlite3BtreeConnectionCount(*ppBtree)>0 );
+ return rc;
+}
+
+/*
+** Decrement the BtShared.nRef counter. When it reaches zero,
+** remove the BtShared structure from the sharing list. Return
+** true if the BtShared.nRef counter reaches zero and return
+** false if it is still positive.
+*/
+static int removeFromSharingList(BtShared *pBt){
+#ifndef SQLITE_OMIT_SHARED_CACHE
+ MUTEX_LOGIC( sqlite3_mutex *pMaster; )
+ BtShared *pList;
+ int removed = 0;
+
+ assert( sqlite3_mutex_notheld(pBt->mutex) );
+ MUTEX_LOGIC( pMaster = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER); )
+ sqlite3_mutex_enter(pMaster);
+ pBt->nRef--;
+ if( pBt->nRef<=0 ){
+ if( GLOBAL(BtShared*,sqlite3SharedCacheList)==pBt ){
+ GLOBAL(BtShared*,sqlite3SharedCacheList) = pBt->pNext;
+ }else{
+ pList = GLOBAL(BtShared*,sqlite3SharedCacheList);
+ while( ALWAYS(pList) && pList->pNext!=pBt ){
+ pList=pList->pNext;
+ }
+ if( ALWAYS(pList) ){
+ pList->pNext = pBt->pNext;
+ }
+ }
+ if( SQLITE_THREADSAFE ){
+ sqlite3_mutex_free(pBt->mutex);
+ }
+ removed = 1;
+ }
+ sqlite3_mutex_leave(pMaster);
+ return removed;
+#else
+ return 1;
+#endif
+}
+
+/*
+** Make sure pBt->pTmpSpace points to an allocation of
+** MX_CELL_SIZE(pBt) bytes with a 4-byte prefix for a left-child
+** pointer.
+*/
+static void allocateTempSpace(BtShared *pBt){
+ if( !pBt->pTmpSpace ){
+ pBt->pTmpSpace = sqlite3PageMalloc( pBt->pageSize );
+
+ /* One of the uses of pBt->pTmpSpace is to format cells before
+ ** inserting them into a leaf page (function fillInCell()). If
+ ** a cell is less than 4 bytes in size, it is rounded up to 4 bytes
+ ** by the various routines that manipulate binary cells. Which
+ ** can mean that fillInCell() only initializes the first 2 or 3
+ ** bytes of pTmpSpace, but that the first 4 bytes are copied from
+ ** it into a database page. This is not actually a problem, but it
+ ** does cause a valgrind error when the 1 or 2 bytes of unitialized
+ ** data is passed to system call write(). So to avoid this error,
+ ** zero the first 4 bytes of temp space here.
+ **
+ ** Also: Provide four bytes of initialized space before the
+ ** beginning of pTmpSpace as an area available to prepend the
+ ** left-child pointer to the beginning of a cell.
+ */
+ if( pBt->pTmpSpace ){
+ memset(pBt->pTmpSpace, 0, 8);
+ pBt->pTmpSpace += 4;
+ }
+ }
+}
+
+/*
+** Free the pBt->pTmpSpace allocation
+*/
+static void freeTempSpace(BtShared *pBt){
+ if( pBt->pTmpSpace ){
+ pBt->pTmpSpace -= 4;
+ sqlite3PageFree(pBt->pTmpSpace);
+ pBt->pTmpSpace = 0;
+ }
+}
+
+/*
+** Close an open database and invalidate all cursors.
+*/
+SQLITE_PRIVATE int sqlite3BtreeClose(Btree *p){
+ BtShared *pBt = p->pBt;
+ BtCursor *pCur;
+
+ /* Close all cursors opened via this handle. */
+ assert( sqlite3_mutex_held(p->db->mutex) );
+ sqlite3BtreeEnter(p);
+ pCur = pBt->pCursor;
+ while( pCur ){
+ BtCursor *pTmp = pCur;
+ pCur = pCur->pNext;
+ if( pTmp->pBtree==p ){
+ sqlite3BtreeCloseCursor(pTmp);
+ }
+ }
+
+ /* Rollback any active transaction and free the handle structure.
+ ** The call to sqlite3BtreeRollback() drops any table-locks held by
+ ** this handle.
+ */
+ sqlite3BtreeRollback(p, SQLITE_OK, 0);
+ sqlite3BtreeLeave(p);
+
+ /* If there are still other outstanding references to the shared-btree
+ ** structure, return now. The remainder of this procedure cleans
+ ** up the shared-btree.
+ */
+ assert( p->wantToLock==0 && p->locked==0 );
+ if( !p->sharable || removeFromSharingList(pBt) ){
+ /* The pBt is no longer on the sharing list, so we can access
+ ** it without having to hold the mutex.
+ **
+ ** Clean out and delete the BtShared object.
+ */
+ assert( !pBt->pCursor );
+ sqlite3PagerClose(pBt->pPager, p->db);
+ if( pBt->xFreeSchema && pBt->pSchema ){
+ pBt->xFreeSchema(pBt->pSchema);
+ }
+ sqlite3DbFree(0, pBt->pSchema);
+ freeTempSpace(pBt);
+ sqlite3_free(pBt);
+ }
+
+#ifndef SQLITE_OMIT_SHARED_CACHE
+ assert( p->wantToLock==0 );
+ assert( p->locked==0 );
+ if( p->pPrev ) p->pPrev->pNext = p->pNext;
+ if( p->pNext ) p->pNext->pPrev = p->pPrev;
+#endif
+
+ sqlite3_free(p);
+ return SQLITE_OK;
+}
+
+/*
+** Change the "soft" limit on the number of pages in the cache.
+** Unused and unmodified pages will be recycled when the number of
+** pages in the cache exceeds this soft limit. But the size of the
+** cache is allowed to grow larger than this limit if it contains
+** dirty pages or pages still in active use.
+*/
+SQLITE_PRIVATE int sqlite3BtreeSetCacheSize(Btree *p, int mxPage){
+ BtShared *pBt = p->pBt;
+ assert( sqlite3_mutex_held(p->db->mutex) );
+ sqlite3BtreeEnter(p);
+ sqlite3PagerSetCachesize(pBt->pPager, mxPage);
+ sqlite3BtreeLeave(p);
+ return SQLITE_OK;
+}
+
+/*
+** Change the "spill" limit on the number of pages in the cache.
+** If the number of pages exceeds this limit during a write transaction,
+** the pager might attempt to "spill" pages to the journal early in
+** order to free up memory.
+**
+** The value returned is the current spill size. If zero is passed
+** as an argument, no changes are made to the spill size setting, so
+** using mxPage of 0 is a way to query the current spill size.
+*/
+SQLITE_PRIVATE int sqlite3BtreeSetSpillSize(Btree *p, int mxPage){
+ BtShared *pBt = p->pBt;
+ int res;
+ assert( sqlite3_mutex_held(p->db->mutex) );
+ sqlite3BtreeEnter(p);
+ res = sqlite3PagerSetSpillsize(pBt->pPager, mxPage);
+ sqlite3BtreeLeave(p);
+ return res;
+}
+
+#if SQLITE_MAX_MMAP_SIZE>0
+/*
+** Change the limit on the amount of the database file that may be
+** memory mapped.
+*/
+SQLITE_PRIVATE int sqlite3BtreeSetMmapLimit(Btree *p, sqlite3_int64 szMmap){
+ BtShared *pBt = p->pBt;
+ assert( sqlite3_mutex_held(p->db->mutex) );
+ sqlite3BtreeEnter(p);
+ sqlite3PagerSetMmapLimit(pBt->pPager, szMmap);
+ sqlite3BtreeLeave(p);
+ return SQLITE_OK;
+}
+#endif /* SQLITE_MAX_MMAP_SIZE>0 */
+
+/*
+** Change the way data is synced to disk in order to increase or decrease
+** how well the database resists damage due to OS crashes and power
+** failures. Level 1 is the same as asynchronous (no syncs() occur and
+** there is a high probability of damage) Level 2 is the default. There
+** is a very low but non-zero probability of damage. Level 3 reduces the
+** probability of damage to near zero but with a write performance reduction.
+*/
+#ifndef SQLITE_OMIT_PAGER_PRAGMAS
+SQLITE_PRIVATE int sqlite3BtreeSetPagerFlags(
+ Btree *p, /* The btree to set the safety level on */
+ unsigned pgFlags /* Various PAGER_* flags */
+){
+ BtShared *pBt = p->pBt;
+ assert( sqlite3_mutex_held(p->db->mutex) );
+ sqlite3BtreeEnter(p);
+ sqlite3PagerSetFlags(pBt->pPager, pgFlags);
+ sqlite3BtreeLeave(p);
+ return SQLITE_OK;
+}
+#endif
+
+/*
+** Change the default pages size and the number of reserved bytes per page.
+** Or, if the page size has already been fixed, return SQLITE_READONLY
+** without changing anything.
+**
+** The page size must be a power of 2 between 512 and 65536. If the page
+** size supplied does not meet this constraint then the page size is not
+** changed.
+**
+** Page sizes are constrained to be a power of two so that the region
+** of the database file used for locking (beginning at PENDING_BYTE,
+** the first byte past the 1GB boundary, 0x40000000) needs to occur
+** at the beginning of a page.
+**
+** If parameter nReserve is less than zero, then the number of reserved
+** bytes per page is left unchanged.
+**
+** If the iFix!=0 then the BTS_PAGESIZE_FIXED flag is set so that the page size
+** and autovacuum mode can no longer be changed.
+*/
+SQLITE_PRIVATE int sqlite3BtreeSetPageSize(Btree *p, int pageSize, int nReserve, int iFix){
+ int rc = SQLITE_OK;
+ BtShared *pBt = p->pBt;
+ assert( nReserve>=-1 && nReserve<=255 );
+ sqlite3BtreeEnter(p);
+#if SQLITE_HAS_CODEC
+ if( nReserve>pBt->optimalReserve ) pBt->optimalReserve = (u8)nReserve;
+#endif
+ if( pBt->btsFlags & BTS_PAGESIZE_FIXED ){
+ sqlite3BtreeLeave(p);
+ return SQLITE_READONLY;
+ }
+ if( nReserve<0 ){
+ nReserve = pBt->pageSize - pBt->usableSize;
+ }
+ assert( nReserve>=0 && nReserve<=255 );
+ if( pageSize>=512 && pageSize<=SQLITE_MAX_PAGE_SIZE &&
+ ((pageSize-1)&pageSize)==0 ){
+ assert( (pageSize & 7)==0 );
+ assert( !pBt->pCursor );
+ pBt->pageSize = (u32)pageSize;
+ freeTempSpace(pBt);
+ }
+ rc = sqlite3PagerSetPagesize(pBt->pPager, &pBt->pageSize, nReserve);
+ pBt->usableSize = pBt->pageSize - (u16)nReserve;
+ if( iFix ) pBt->btsFlags |= BTS_PAGESIZE_FIXED;
+ sqlite3BtreeLeave(p);
+ return rc;
+}
+
+/*
+** Return the currently defined page size
+*/
+SQLITE_PRIVATE int sqlite3BtreeGetPageSize(Btree *p){
+ return p->pBt->pageSize;
+}
+
+/*
+** This function is similar to sqlite3BtreeGetReserve(), except that it
+** may only be called if it is guaranteed that the b-tree mutex is already
+** held.
+**
+** This is useful in one special case in the backup API code where it is
+** known that the shared b-tree mutex is held, but the mutex on the
+** database handle that owns *p is not. In this case if sqlite3BtreeEnter()
+** were to be called, it might collide with some other operation on the
+** database handle that owns *p, causing undefined behavior.
+*/
+SQLITE_PRIVATE int sqlite3BtreeGetReserveNoMutex(Btree *p){
+ int n;
+ assert( sqlite3_mutex_held(p->pBt->mutex) );
+ n = p->pBt->pageSize - p->pBt->usableSize;
+ return n;
+}
+
+/*
+** Return the number of bytes of space at the end of every page that
+** are intentually left unused. This is the "reserved" space that is
+** sometimes used by extensions.
+**
+** If SQLITE_HAS_MUTEX is defined then the number returned is the
+** greater of the current reserved space and the maximum requested
+** reserve space.
+*/
+SQLITE_PRIVATE int sqlite3BtreeGetOptimalReserve(Btree *p){
+ int n;
+ sqlite3BtreeEnter(p);
+ n = sqlite3BtreeGetReserveNoMutex(p);
+#ifdef SQLITE_HAS_CODEC
+ if( n<p->pBt->optimalReserve ) n = p->pBt->optimalReserve;
+#endif
+ sqlite3BtreeLeave(p);
+ return n;
+}
+
+
+/*
+** Set the maximum page count for a database if mxPage is positive.
+** No changes are made if mxPage is 0 or negative.
+** Regardless of the value of mxPage, return the maximum page count.
+*/
+SQLITE_PRIVATE int sqlite3BtreeMaxPageCount(Btree *p, int mxPage){
+ int n;
+ sqlite3BtreeEnter(p);
+ n = sqlite3PagerMaxPageCount(p->pBt->pPager, mxPage);
+ sqlite3BtreeLeave(p);
+ return n;
+}
+
+/*
+** Set the BTS_SECURE_DELETE flag if newFlag is 0 or 1. If newFlag is -1,
+** then make no changes. Always return the value of the BTS_SECURE_DELETE
+** setting after the change.
+*/
+SQLITE_PRIVATE int sqlite3BtreeSecureDelete(Btree *p, int newFlag){
+ int b;
+ if( p==0 ) return 0;
+ sqlite3BtreeEnter(p);
+ if( newFlag>=0 ){
+ p->pBt->btsFlags &= ~BTS_SECURE_DELETE;
+ if( newFlag ) p->pBt->btsFlags |= BTS_SECURE_DELETE;
+ }
+ b = (p->pBt->btsFlags & BTS_SECURE_DELETE)!=0;
+ sqlite3BtreeLeave(p);
+ return b;
+}
+
+/*
+** Change the 'auto-vacuum' property of the database. If the 'autoVacuum'
+** parameter is non-zero, then auto-vacuum mode is enabled. If zero, it
+** is disabled. The default value for the auto-vacuum property is
+** determined by the SQLITE_DEFAULT_AUTOVACUUM macro.
+*/
+SQLITE_PRIVATE int sqlite3BtreeSetAutoVacuum(Btree *p, int autoVacuum){
+#ifdef SQLITE_OMIT_AUTOVACUUM
+ return SQLITE_READONLY;
+#else
+ BtShared *pBt = p->pBt;
+ int rc = SQLITE_OK;
+ u8 av = (u8)autoVacuum;
+
+ sqlite3BtreeEnter(p);
+ if( (pBt->btsFlags & BTS_PAGESIZE_FIXED)!=0 && (av ?1:0)!=pBt->autoVacuum ){
+ rc = SQLITE_READONLY;
+ }else{
+ pBt->autoVacuum = av ?1:0;
+ pBt->incrVacuum = av==2 ?1:0;
+ }
+ sqlite3BtreeLeave(p);
+ return rc;
+#endif
+}
+
+/*
+** Return the value of the 'auto-vacuum' property. If auto-vacuum is
+** enabled 1 is returned. Otherwise 0.
+*/
+SQLITE_PRIVATE int sqlite3BtreeGetAutoVacuum(Btree *p){
+#ifdef SQLITE_OMIT_AUTOVACUUM
+ return BTREE_AUTOVACUUM_NONE;
+#else
+ int rc;
+ sqlite3BtreeEnter(p);
+ rc = (
+ (!p->pBt->autoVacuum)?BTREE_AUTOVACUUM_NONE:
+ (!p->pBt->incrVacuum)?BTREE_AUTOVACUUM_FULL:
+ BTREE_AUTOVACUUM_INCR
+ );
+ sqlite3BtreeLeave(p);
+ return rc;
+#endif
+}
+
+
+/*
+** Get a reference to pPage1 of the database file. This will
+** also acquire a readlock on that file.
+**
+** SQLITE_OK is returned on success. If the file is not a
+** well-formed database file, then SQLITE_CORRUPT is returned.
+** SQLITE_BUSY is returned if the database is locked. SQLITE_NOMEM
+** is returned if we run out of memory.
+*/
+static int lockBtree(BtShared *pBt){
+ int rc; /* Result code from subfunctions */
+ MemPage *pPage1; /* Page 1 of the database file */
+ int nPage; /* Number of pages in the database */
+ int nPageFile = 0; /* Number of pages in the database file */
+ int nPageHeader; /* Number of pages in the database according to hdr */
+
+ assert( sqlite3_mutex_held(pBt->mutex) );
+ assert( pBt->pPage1==0 );
+ rc = sqlite3PagerSharedLock(pBt->pPager);
+ if( rc!=SQLITE_OK ) return rc;
+ rc = btreeGetPage(pBt, 1, &pPage1, 0);
+ if( rc!=SQLITE_OK ) return rc;
+
+ /* Do some checking to help insure the file we opened really is
+ ** a valid database file.
+ */
+ nPage = nPageHeader = get4byte(28+(u8*)pPage1->aData);
+ sqlite3PagerPagecount(pBt->pPager, &nPageFile);
+ if( nPage==0 || memcmp(24+(u8*)pPage1->aData, 92+(u8*)pPage1->aData,4)!=0 ){
+ nPage = nPageFile;
+ }
+ if( nPage>0 ){
+ u32 pageSize;
+ u32 usableSize;
+ u8 *page1 = pPage1->aData;
+ rc = SQLITE_NOTADB;
+ /* EVIDENCE-OF: R-43737-39999 Every valid SQLite database file begins
+ ** with the following 16 bytes (in hex): 53 51 4c 69 74 65 20 66 6f 72 6d
+ ** 61 74 20 33 00. */
+ if( memcmp(page1, zMagicHeader, 16)!=0 ){
+ goto page1_init_failed;
+ }
+
+#ifdef SQLITE_OMIT_WAL
+ if( page1[18]>1 ){
+ pBt->btsFlags |= BTS_READ_ONLY;
+ }
+ if( page1[19]>1 ){
+ goto page1_init_failed;
+ }
+#else
+ if( page1[18]>2 ){
+ pBt->btsFlags |= BTS_READ_ONLY;
+ }
+ if( page1[19]>2 ){
+ goto page1_init_failed;
+ }
+
+ /* If the write version is set to 2, this database should be accessed
+ ** in WAL mode. If the log is not already open, open it now. Then
+ ** return SQLITE_OK and return without populating BtShared.pPage1.
+ ** The caller detects this and calls this function again. This is
+ ** required as the version of page 1 currently in the page1 buffer
+ ** may not be the latest version - there may be a newer one in the log
+ ** file.
+ */
+ if( page1[19]==2 && (pBt->btsFlags & BTS_NO_WAL)==0 ){
+ int isOpen = 0;
+ rc = sqlite3PagerOpenWal(pBt->pPager, &isOpen);
+ if( rc!=SQLITE_OK ){
+ goto page1_init_failed;
+ }else{
+#if SQLITE_DEFAULT_SYNCHRONOUS!=SQLITE_DEFAULT_WAL_SYNCHRONOUS
+ sqlite3 *db;
+ Db *pDb;
+ if( (db=pBt->db)!=0 && (pDb=db->aDb)!=0 ){
+ while( pDb->pBt==0 || pDb->pBt->pBt!=pBt ){ pDb++; }
+ if( pDb->bSyncSet==0
+ && pDb->safety_level==SQLITE_DEFAULT_SYNCHRONOUS+1
+ ){
+ pDb->safety_level = SQLITE_DEFAULT_WAL_SYNCHRONOUS+1;
+ sqlite3PagerSetFlags(pBt->pPager,
+ pDb->safety_level | (db->flags & PAGER_FLAGS_MASK));
+ }
+ }
+#endif
+ if( isOpen==0 ){
+ releasePage(pPage1);
+ return SQLITE_OK;
+ }
+ }
+ rc = SQLITE_NOTADB;
+ }
+#endif
+
+ /* EVIDENCE-OF: R-15465-20813 The maximum and minimum embedded payload
+ ** fractions and the leaf payload fraction values must be 64, 32, and 32.
+ **
+ ** The original design allowed these amounts to vary, but as of
+ ** version 3.6.0, we require them to be fixed.
+ */
+ if( memcmp(&page1[21], "\100\040\040",3)!=0 ){
+ goto page1_init_failed;
+ }
+ /* EVIDENCE-OF: R-51873-39618 The page size for a database file is
+ ** determined by the 2-byte integer located at an offset of 16 bytes from
+ ** the beginning of the database file. */
+ pageSize = (page1[16]<<8) | (page1[17]<<16);
+ /* EVIDENCE-OF: R-25008-21688 The size of a page is a power of two
+ ** between 512 and 65536 inclusive. */
+ if( ((pageSize-1)&pageSize)!=0
+ || pageSize>SQLITE_MAX_PAGE_SIZE
+ || pageSize<=256
+ ){
+ goto page1_init_failed;
+ }
+ assert( (pageSize & 7)==0 );
+ /* EVIDENCE-OF: R-59310-51205 The "reserved space" size in the 1-byte
+ ** integer at offset 20 is the number of bytes of space at the end of
+ ** each page to reserve for extensions.
+ **
+ ** EVIDENCE-OF: R-37497-42412 The size of the reserved region is
+ ** determined by the one-byte unsigned integer found at an offset of 20
+ ** into the database file header. */
+ usableSize = pageSize - page1[20];
+ if( (u32)pageSize!=pBt->pageSize ){
+ /* After reading the first page of the database assuming a page size
+ ** of BtShared.pageSize, we have discovered that the page-size is
+ ** actually pageSize. Unlock the database, leave pBt->pPage1 at
+ ** zero and return SQLITE_OK. The caller will call this function
+ ** again with the correct page-size.
+ */
+ releasePage(pPage1);
+ pBt->usableSize = usableSize;
+ pBt->pageSize = pageSize;
+ freeTempSpace(pBt);
+ rc = sqlite3PagerSetPagesize(pBt->pPager, &pBt->pageSize,
+ pageSize-usableSize);
+ return rc;
+ }
+ if( (pBt->db->flags & SQLITE_RecoveryMode)==0 && nPage>nPageFile ){
+ rc = SQLITE_CORRUPT_BKPT;
+ goto page1_init_failed;
+ }
+ /* EVIDENCE-OF: R-28312-64704 However, the usable size is not allowed to
+ ** be less than 480. In other words, if the page size is 512, then the
+ ** reserved space size cannot exceed 32. */
+ if( usableSize<480 ){
+ goto page1_init_failed;
+ }
+ pBt->pageSize = pageSize;
+ pBt->usableSize = usableSize;
+#ifndef SQLITE_OMIT_AUTOVACUUM
+ pBt->autoVacuum = (get4byte(&page1[36 + 4*4])?1:0);
+ pBt->incrVacuum = (get4byte(&page1[36 + 7*4])?1:0);
+#endif
+ }
+
+ /* maxLocal is the maximum amount of payload to store locally for
+ ** a cell. Make sure it is small enough so that at least minFanout
+ ** cells can will fit on one page. We assume a 10-byte page header.
+ ** Besides the payload, the cell must store:
+ ** 2-byte pointer to the cell
+ ** 4-byte child pointer
+ ** 9-byte nKey value
+ ** 4-byte nData value
+ ** 4-byte overflow page pointer
+ ** So a cell consists of a 2-byte pointer, a header which is as much as
+ ** 17 bytes long, 0 to N bytes of payload, and an optional 4 byte overflow
+ ** page pointer.
+ */
+ pBt->maxLocal = (u16)((pBt->usableSize-12)*64/255 - 23);
+ pBt->minLocal = (u16)((pBt->usableSize-12)*32/255 - 23);
+ pBt->maxLeaf = (u16)(pBt->usableSize - 35);
+ pBt->minLeaf = (u16)((pBt->usableSize-12)*32/255 - 23);
+ if( pBt->maxLocal>127 ){
+ pBt->max1bytePayload = 127;
+ }else{
+ pBt->max1bytePayload = (u8)pBt->maxLocal;
+ }
+ assert( pBt->maxLeaf + 23 <= MX_CELL_SIZE(pBt) );
+ pBt->pPage1 = pPage1;
+ pBt->nPage = nPage;
+ return SQLITE_OK;
+
+page1_init_failed:
+ releasePage(pPage1);
+ pBt->pPage1 = 0;
+ return rc;
+}
+
+#ifndef NDEBUG
+/*
+** Return the number of cursors open on pBt. This is for use
+** in assert() expressions, so it is only compiled if NDEBUG is not
+** defined.
+**
+** Only write cursors are counted if wrOnly is true. If wrOnly is
+** false then all cursors are counted.
+**
+** For the purposes of this routine, a cursor is any cursor that
+** is capable of reading or writing to the database. Cursors that
+** have been tripped into the CURSOR_FAULT state are not counted.
+*/
+static int countValidCursors(BtShared *pBt, int wrOnly){
+ BtCursor *pCur;
+ int r = 0;
+ for(pCur=pBt->pCursor; pCur; pCur=pCur->pNext){
+ if( (wrOnly==0 || (pCur->curFlags & BTCF_WriteFlag)!=0)
+ && pCur->eState!=CURSOR_FAULT ) r++;
+ }
+ return r;
+}
+#endif
+
+/*
+** If there are no outstanding cursors and we are not in the middle
+** of a transaction but there is a read lock on the database, then
+** this routine unrefs the first page of the database file which
+** has the effect of releasing the read lock.
+**
+** If there is a transaction in progress, this routine is a no-op.
+*/
+static void unlockBtreeIfUnused(BtShared *pBt){
+ assert( sqlite3_mutex_held(pBt->mutex) );
+ assert( countValidCursors(pBt,0)==0 || pBt->inTransaction>TRANS_NONE );
+ if( pBt->inTransaction==TRANS_NONE && pBt->pPage1!=0 ){
+ MemPage *pPage1 = pBt->pPage1;
+ assert( pPage1->aData );
+ assert( sqlite3PagerRefcount(pBt->pPager)==1 );
+ pBt->pPage1 = 0;
+ releasePageNotNull(pPage1);
+ }
+}
+
+/*
+** If pBt points to an empty file then convert that empty file
+** into a new empty database by initializing the first page of
+** the database.
+*/
+static int newDatabase(BtShared *pBt){
+ MemPage *pP1;
+ unsigned char *data;
+ int rc;
+
+ assert( sqlite3_mutex_held(pBt->mutex) );
+ if( pBt->nPage>0 ){
+ return SQLITE_OK;
+ }
+ pP1 = pBt->pPage1;
+ assert( pP1!=0 );
+ data = pP1->aData;
+ rc = sqlite3PagerWrite(pP1->pDbPage);
+ if( rc ) return rc;
+ memcpy(data, zMagicHeader, sizeof(zMagicHeader));
+ assert( sizeof(zMagicHeader)==16 );
+ data[16] = (u8)((pBt->pageSize>>8)&0xff);
+ data[17] = (u8)((pBt->pageSize>>16)&0xff);
+ data[18] = 1;
+ data[19] = 1;
+ assert( pBt->usableSize<=pBt->pageSize && pBt->usableSize+255>=pBt->pageSize);
+ data[20] = (u8)(pBt->pageSize - pBt->usableSize);
+ data[21] = 64;
+ data[22] = 32;
+ data[23] = 32;
+ memset(&data[24], 0, 100-24);
+ zeroPage(pP1, PTF_INTKEY|PTF_LEAF|PTF_LEAFDATA );
+ pBt->btsFlags |= BTS_PAGESIZE_FIXED;
+#ifndef SQLITE_OMIT_AUTOVACUUM
+ assert( pBt->autoVacuum==1 || pBt->autoVacuum==0 );
+ assert( pBt->incrVacuum==1 || pBt->incrVacuum==0 );
+ put4byte(&data[36 + 4*4], pBt->autoVacuum);
+ put4byte(&data[36 + 7*4], pBt->incrVacuum);
+#endif
+ pBt->nPage = 1;
+ data[31] = 1;
+ return SQLITE_OK;
+}
+
+/*
+** Initialize the first page of the database file (creating a database
+** consisting of a single page and no schema objects). Return SQLITE_OK
+** if successful, or an SQLite error code otherwise.
+*/
+SQLITE_PRIVATE int sqlite3BtreeNewDb(Btree *p){
+ int rc;
+ sqlite3BtreeEnter(p);
+ p->pBt->nPage = 0;
+ rc = newDatabase(p->pBt);
+ sqlite3BtreeLeave(p);
+ return rc;
+}
+
+/*
+** Attempt to start a new transaction. A write-transaction
+** is started if the second argument is nonzero, otherwise a read-
+** transaction. If the second argument is 2 or more and exclusive
+** transaction is started, meaning that no other process is allowed
+** to access the database. A preexisting transaction may not be
+** upgraded to exclusive by calling this routine a second time - the
+** exclusivity flag only works for a new transaction.
+**
+** A write-transaction must be started before attempting any
+** changes to the database. None of the following routines
+** will work unless a transaction is started first:
+**
+** sqlite3BtreeCreateTable()
+** sqlite3BtreeCreateIndex()
+** sqlite3BtreeClearTable()
+** sqlite3BtreeDropTable()
+** sqlite3BtreeInsert()
+** sqlite3BtreeDelete()
+** sqlite3BtreeUpdateMeta()
+**
+** If an initial attempt to acquire the lock fails because of lock contention
+** and the database was previously unlocked, then invoke the busy handler
+** if there is one. But if there was previously a read-lock, do not
+** invoke the busy handler - just return SQLITE_BUSY. SQLITE_BUSY is
+** returned when there is already a read-lock in order to avoid a deadlock.
+**
+** Suppose there are two processes A and B. A has a read lock and B has
+** a reserved lock. B tries to promote to exclusive but is blocked because
+** of A's read lock. A tries to promote to reserved but is blocked by B.
+** One or the other of the two processes must give way or there can be
+** no progress. By returning SQLITE_BUSY and not invoking the busy callback
+** when A already has a read lock, we encourage A to give up and let B
+** proceed.
+*/
+SQLITE_PRIVATE int sqlite3BtreeBeginTrans(Btree *p, int wrflag){
+ BtShared *pBt = p->pBt;
+ int rc = SQLITE_OK;
+
+ sqlite3BtreeEnter(p);
+ btreeIntegrity(p);
+
+ /* If the btree is already in a write-transaction, or it
+ ** is already in a read-transaction and a read-transaction
+ ** is requested, this is a no-op.
+ */
+ if( p->inTrans==TRANS_WRITE || (p->inTrans==TRANS_READ && !wrflag) ){
+ goto trans_begun;
+ }
+ assert( pBt->inTransaction==TRANS_WRITE || IfNotOmitAV(pBt->bDoTruncate)==0 );
+
+ /* Write transactions are not possible on a read-only database */
+ if( (pBt->btsFlags & BTS_READ_ONLY)!=0 && wrflag ){
+ rc = SQLITE_READONLY;
+ goto trans_begun;
+ }
+
+#ifndef SQLITE_OMIT_SHARED_CACHE
+ {
+ sqlite3 *pBlock = 0;
+ /* If another database handle has already opened a write transaction
+ ** on this shared-btree structure and a second write transaction is
+ ** requested, return SQLITE_LOCKED.
+ */
+ if( (wrflag && pBt->inTransaction==TRANS_WRITE)
+ || (pBt->btsFlags & BTS_PENDING)!=0
+ ){
+ pBlock = pBt->pWriter->db;
+ }else if( wrflag>1 ){
+ BtLock *pIter;
+ for(pIter=pBt->pLock; pIter; pIter=pIter->pNext){
+ if( pIter->pBtree!=p ){
+ pBlock = pIter->pBtree->db;
+ break;
+ }
+ }
+ }
+ if( pBlock ){
+ sqlite3ConnectionBlocked(p->db, pBlock);
+ rc = SQLITE_LOCKED_SHAREDCACHE;
+ goto trans_begun;
+ }
+ }
+#endif
+
+ /* Any read-only or read-write transaction implies a read-lock on
+ ** page 1. So if some other shared-cache client already has a write-lock
+ ** on page 1, the transaction cannot be opened. */
+ rc = querySharedCacheTableLock(p, MASTER_ROOT, READ_LOCK);
+ if( SQLITE_OK!=rc ) goto trans_begun;
+
+ pBt->btsFlags &= ~BTS_INITIALLY_EMPTY;
+ if( pBt->nPage==0 ) pBt->btsFlags |= BTS_INITIALLY_EMPTY;
+ do {
+ /* Call lockBtree() until either pBt->pPage1 is populated or
+ ** lockBtree() returns something other than SQLITE_OK. lockBtree()
+ ** may return SQLITE_OK but leave pBt->pPage1 set to 0 if after
+ ** reading page 1 it discovers that the page-size of the database
+ ** file is not pBt->pageSize. In this case lockBtree() will update
+ ** pBt->pageSize to the page-size of the file on disk.
+ */
+ while( pBt->pPage1==0 && SQLITE_OK==(rc = lockBtree(pBt)) );
+
+ if( rc==SQLITE_OK && wrflag ){
+ if( (pBt->btsFlags & BTS_READ_ONLY)!=0 ){
+ rc = SQLITE_READONLY;
+ }else{
+ rc = sqlite3PagerBegin(pBt->pPager,wrflag>1,sqlite3TempInMemory(p->db));
+ if( rc==SQLITE_OK ){
+ rc = newDatabase(pBt);
+ }
+ }
+ }
+
+ if( rc!=SQLITE_OK ){
+ unlockBtreeIfUnused(pBt);
+ }
+ }while( (rc&0xFF)==SQLITE_BUSY && pBt->inTransaction==TRANS_NONE &&
+ btreeInvokeBusyHandler(pBt) );
+
+ if( rc==SQLITE_OK ){
+ if( p->inTrans==TRANS_NONE ){
+ pBt->nTransaction++;
+#ifndef SQLITE_OMIT_SHARED_CACHE
+ if( p->sharable ){
+ assert( p->lock.pBtree==p && p->lock.iTable==1 );
+ p->lock.eLock = READ_LOCK;
+ p->lock.pNext = pBt->pLock;
+ pBt->pLock = &p->lock;
+ }
+#endif
+ }
+ p->inTrans = (wrflag?TRANS_WRITE:TRANS_READ);
+ if( p->inTrans>pBt->inTransaction ){
+ pBt->inTransaction = p->inTrans;
+ }
+ if( wrflag ){
+ MemPage *pPage1 = pBt->pPage1;
+#ifndef SQLITE_OMIT_SHARED_CACHE
+ assert( !pBt->pWriter );
+ pBt->pWriter = p;
+ pBt->btsFlags &= ~BTS_EXCLUSIVE;
+ if( wrflag>1 ) pBt->btsFlags |= BTS_EXCLUSIVE;
+#endif
+
+ /* If the db-size header field is incorrect (as it may be if an old
+ ** client has been writing the database file), update it now. Doing
+ ** this sooner rather than later means the database size can safely
+ ** re-read the database size from page 1 if a savepoint or transaction
+ ** rollback occurs within the transaction.
+ */
+ if( pBt->nPage!=get4byte(&pPage1->aData[28]) ){
+ rc = sqlite3PagerWrite(pPage1->pDbPage);
+ if( rc==SQLITE_OK ){
+ put4byte(&pPage1->aData[28], pBt->nPage);
+ }
+ }
+ }
+ }
+
+
+trans_begun:
+ if( rc==SQLITE_OK && wrflag ){
+ /* This call makes sure that the pager has the correct number of
+ ** open savepoints. If the second parameter is greater than 0 and
+ ** the sub-journal is not already open, then it will be opened here.
+ */
+ rc = sqlite3PagerOpenSavepoint(pBt->pPager, p->db->nSavepoint);
+ }
+
+ btreeIntegrity(p);
+ sqlite3BtreeLeave(p);
+ return rc;
+}
+
+#ifndef SQLITE_OMIT_AUTOVACUUM
+
+/*
+** Set the pointer-map entries for all children of page pPage. Also, if
+** pPage contains cells that point to overflow pages, set the pointer
+** map entries for the overflow pages as well.
+*/
+static int setChildPtrmaps(MemPage *pPage){
+ int i; /* Counter variable */
+ int nCell; /* Number of cells in page pPage */
+ int rc; /* Return code */
+ BtShared *pBt = pPage->pBt;
+ Pgno pgno = pPage->pgno;
+
+ assert( sqlite3_mutex_held(pPage->pBt->mutex) );
+ rc = btreeInitPage(pPage);
+ if( rc!=SQLITE_OK ) return rc;
+ nCell = pPage->nCell;
+
+ for(i=0; i<nCell; i++){
+ u8 *pCell = findCell(pPage, i);
+
+ ptrmapPutOvflPtr(pPage, pCell, &rc);
+
+ if( !pPage->leaf ){
+ Pgno childPgno = get4byte(pCell);
+ ptrmapPut(pBt, childPgno, PTRMAP_BTREE, pgno, &rc);
+ }
+ }
+
+ if( !pPage->leaf ){
+ Pgno childPgno = get4byte(&pPage->aData[pPage->hdrOffset+8]);
+ ptrmapPut(pBt, childPgno, PTRMAP_BTREE, pgno, &rc);
+ }
+
+ return rc;
+}
+
+/*
+** Somewhere on pPage is a pointer to page iFrom. Modify this pointer so
+** that it points to iTo. Parameter eType describes the type of pointer to
+** be modified, as follows:
+**
+** PTRMAP_BTREE: pPage is a btree-page. The pointer points at a child
+** page of pPage.
+**
+** PTRMAP_OVERFLOW1: pPage is a btree-page. The pointer points at an overflow
+** page pointed to by one of the cells on pPage.
+**
+** PTRMAP_OVERFLOW2: pPage is an overflow-page. The pointer points at the next
+** overflow page in the list.
+*/
+static int modifyPagePointer(MemPage *pPage, Pgno iFrom, Pgno iTo, u8 eType){
+ assert( sqlite3_mutex_held(pPage->pBt->mutex) );
+ assert( sqlite3PagerIswriteable(pPage->pDbPage) );
+ if( eType==PTRMAP_OVERFLOW2 ){
+ /* The pointer is always the first 4 bytes of the page in this case. */
+ if( get4byte(pPage->aData)!=iFrom ){
+ return SQLITE_CORRUPT_BKPT;
+ }
+ put4byte(pPage->aData, iTo);
+ }else{
+ int i;
+ int nCell;
+ int rc;
+
+ rc = btreeInitPage(pPage);
+ if( rc ) return rc;
+ nCell = pPage->nCell;
+
+ for(i=0; i<nCell; i++){
+ u8 *pCell = findCell(pPage, i);
+ if( eType==PTRMAP_OVERFLOW1 ){
+ CellInfo info;
+ pPage->xParseCell(pPage, pCell, &info);
+ if( info.nLocal<info.nPayload ){
+ if( pCell+info.nSize > pPage->aData+pPage->pBt->usableSize ){
+ return SQLITE_CORRUPT_BKPT;
+ }
+ if( iFrom==get4byte(pCell+info.nSize-4) ){
+ put4byte(pCell+info.nSize-4, iTo);
+ break;
+ }
+ }
+ }else{
+ if( get4byte(pCell)==iFrom ){
+ put4byte(pCell, iTo);
+ break;
+ }
+ }
+ }
+
+ if( i==nCell ){
+ if( eType!=PTRMAP_BTREE ||
+ get4byte(&pPage->aData[pPage->hdrOffset+8])!=iFrom ){
+ return SQLITE_CORRUPT_BKPT;
+ }
+ put4byte(&pPage->aData[pPage->hdrOffset+8], iTo);
+ }
+ }
+ return SQLITE_OK;
+}
+
+
+/*
+** Move the open database page pDbPage to location iFreePage in the
+** database. The pDbPage reference remains valid.
+**
+** The isCommit flag indicates that there is no need to remember that
+** the journal needs to be sync()ed before database page pDbPage->pgno
+** can be written to. The caller has already promised not to write to that
+** page.
+*/
+static int relocatePage(
+ BtShared *pBt, /* Btree */
+ MemPage *pDbPage, /* Open page to move */
+ u8 eType, /* Pointer map 'type' entry for pDbPage */
+ Pgno iPtrPage, /* Pointer map 'page-no' entry for pDbPage */
+ Pgno iFreePage, /* The location to move pDbPage to */
+ int isCommit /* isCommit flag passed to sqlite3PagerMovepage */
+){
+ MemPage *pPtrPage; /* The page that contains a pointer to pDbPage */
+ Pgno iDbPage = pDbPage->pgno;
+ Pager *pPager = pBt->pPager;
+ int rc;
+
+ assert( eType==PTRMAP_OVERFLOW2 || eType==PTRMAP_OVERFLOW1 ||
+ eType==PTRMAP_BTREE || eType==PTRMAP_ROOTPAGE );
+ assert( sqlite3_mutex_held(pBt->mutex) );
+ assert( pDbPage->pBt==pBt );
+
+ /* Move page iDbPage from its current location to page number iFreePage */
+ TRACE(("AUTOVACUUM: Moving %d to free page %d (ptr page %d type %d)\n",
+ iDbPage, iFreePage, iPtrPage, eType));
+ rc = sqlite3PagerMovepage(pPager, pDbPage->pDbPage, iFreePage, isCommit);
+ if( rc!=SQLITE_OK ){
+ return rc;
+ }
+ pDbPage->pgno = iFreePage;
+
+ /* If pDbPage was a btree-page, then it may have child pages and/or cells
+ ** that point to overflow pages. The pointer map entries for all these
+ ** pages need to be changed.
+ **
+ ** If pDbPage is an overflow page, then the first 4 bytes may store a
+ ** pointer to a subsequent overflow page. If this is the case, then
+ ** the pointer map needs to be updated for the subsequent overflow page.
+ */
+ if( eType==PTRMAP_BTREE || eType==PTRMAP_ROOTPAGE ){
+ rc = setChildPtrmaps(pDbPage);
+ if( rc!=SQLITE_OK ){
+ return rc;
+ }
+ }else{
+ Pgno nextOvfl = get4byte(pDbPage->aData);
+ if( nextOvfl!=0 ){
+ ptrmapPut(pBt, nextOvfl, PTRMAP_OVERFLOW2, iFreePage, &rc);
+ if( rc!=SQLITE_OK ){
+ return rc;
+ }
+ }
+ }
+
+ /* Fix the database pointer on page iPtrPage that pointed at iDbPage so
+ ** that it points at iFreePage. Also fix the pointer map entry for
+ ** iPtrPage.
+ */
+ if( eType!=PTRMAP_ROOTPAGE ){
+ rc = btreeGetPage(pBt, iPtrPage, &pPtrPage, 0);
+ if( rc!=SQLITE_OK ){
+ return rc;
+ }
+ rc = sqlite3PagerWrite(pPtrPage->pDbPage);
+ if( rc!=SQLITE_OK ){
+ releasePage(pPtrPage);
+ return rc;
+ }
+ rc = modifyPagePointer(pPtrPage, iDbPage, iFreePage, eType);
+ releasePage(pPtrPage);
+ if( rc==SQLITE_OK ){
+ ptrmapPut(pBt, iFreePage, eType, iPtrPage, &rc);
+ }
+ }
+ return rc;
+}
+
+/* Forward declaration required by incrVacuumStep(). */
+static int allocateBtreePage(BtShared *, MemPage **, Pgno *, Pgno, u8);
+
+/*
+** Perform a single step of an incremental-vacuum. If successful, return
+** SQLITE_OK. If there is no work to do (and therefore no point in
+** calling this function again), return SQLITE_DONE. Or, if an error
+** occurs, return some other error code.
+**
+** More specifically, this function attempts to re-organize the database so
+** that the last page of the file currently in use is no longer in use.
+**
+** Parameter nFin is the number of pages that this database would contain
+** were this function called until it returns SQLITE_DONE.
+**
+** If the bCommit parameter is non-zero, this function assumes that the
+** caller will keep calling incrVacuumStep() until it returns SQLITE_DONE
+** or an error. bCommit is passed true for an auto-vacuum-on-commit
+** operation, or false for an incremental vacuum.
+*/
+static int incrVacuumStep(BtShared *pBt, Pgno nFin, Pgno iLastPg, int bCommit){
+ Pgno nFreeList; /* Number of pages still on the free-list */
+ int rc;
+
+ assert( sqlite3_mutex_held(pBt->mutex) );
+ assert( iLastPg>nFin );
+
+ if( !PTRMAP_ISPAGE(pBt, iLastPg) && iLastPg!=PENDING_BYTE_PAGE(pBt) ){
+ u8 eType;
+ Pgno iPtrPage;
+
+ nFreeList = get4byte(&pBt->pPage1->aData[36]);
+ if( nFreeList==0 ){
+ return SQLITE_DONE;
+ }
+
+ rc = ptrmapGet(pBt, iLastPg, &eType, &iPtrPage);
+ if( rc!=SQLITE_OK ){
+ return rc;
+ }
+ if( eType==PTRMAP_ROOTPAGE ){
+ return SQLITE_CORRUPT_BKPT;
+ }
+
+ if( eType==PTRMAP_FREEPAGE ){
+ if( bCommit==0 ){
+ /* Remove the page from the files free-list. This is not required
+ ** if bCommit is non-zero. In that case, the free-list will be
+ ** truncated to zero after this function returns, so it doesn't
+ ** matter if it still contains some garbage entries.
+ */
+ Pgno iFreePg;
+ MemPage *pFreePg;
+ rc = allocateBtreePage(pBt, &pFreePg, &iFreePg, iLastPg, BTALLOC_EXACT);
+ if( rc!=SQLITE_OK ){
+ return rc;
+ }
+ assert( iFreePg==iLastPg );
+ releasePage(pFreePg);
+ }
+ } else {
+ Pgno iFreePg; /* Index of free page to move pLastPg to */
+ MemPage *pLastPg;
+ u8 eMode = BTALLOC_ANY; /* Mode parameter for allocateBtreePage() */
+ Pgno iNear = 0; /* nearby parameter for allocateBtreePage() */
+
+ rc = btreeGetPage(pBt, iLastPg, &pLastPg, 0);
+ if( rc!=SQLITE_OK ){
+ return rc;
+ }
+
+ /* If bCommit is zero, this loop runs exactly once and page pLastPg
+ ** is swapped with the first free page pulled off the free list.
+ **
+ ** On the other hand, if bCommit is greater than zero, then keep
+ ** looping until a free-page located within the first nFin pages
+ ** of the file is found.
+ */
+ if( bCommit==0 ){
+ eMode = BTALLOC_LE;
+ iNear = nFin;
+ }
+ do {
+ MemPage *pFreePg;
+ rc = allocateBtreePage(pBt, &pFreePg, &iFreePg, iNear, eMode);
+ if( rc!=SQLITE_OK ){
+ releasePage(pLastPg);
+ return rc;
+ }
+ releasePage(pFreePg);
+ }while( bCommit && iFreePg>nFin );
+ assert( iFreePg<iLastPg );
+
+ rc = relocatePage(pBt, pLastPg, eType, iPtrPage, iFreePg, bCommit);
+ releasePage(pLastPg);
+ if( rc!=SQLITE_OK ){
+ return rc;
+ }
+ }
+ }
+
+ if( bCommit==0 ){
+ do {
+ iLastPg--;
+ }while( iLastPg==PENDING_BYTE_PAGE(pBt) || PTRMAP_ISPAGE(pBt, iLastPg) );
+ pBt->bDoTruncate = 1;
+ pBt->nPage = iLastPg;
+ }
+ return SQLITE_OK;
+}
+
+/*
+** The database opened by the first argument is an auto-vacuum database
+** nOrig pages in size containing nFree free pages. Return the expected
+** size of the database in pages following an auto-vacuum operation.
+*/
+static Pgno finalDbSize(BtShared *pBt, Pgno nOrig, Pgno nFree){
+ int nEntry; /* Number of entries on one ptrmap page */
+ Pgno nPtrmap; /* Number of PtrMap pages to be freed */
+ Pgno nFin; /* Return value */
+
+ nEntry = pBt->usableSize/5;
+ nPtrmap = (nFree-nOrig+PTRMAP_PAGENO(pBt, nOrig)+nEntry)/nEntry;
+ nFin = nOrig - nFree - nPtrmap;
+ if( nOrig>PENDING_BYTE_PAGE(pBt) && nFin<PENDING_BYTE_PAGE(pBt) ){
+ nFin--;
+ }
+ while( PTRMAP_ISPAGE(pBt, nFin) || nFin==PENDING_BYTE_PAGE(pBt) ){
+ nFin--;
+ }
+
+ return nFin;
+}
+
+/*
+** A write-transaction must be opened before calling this function.
+** It performs a single unit of work towards an incremental vacuum.
+**
+** If the incremental vacuum is finished after this function has run,
+** SQLITE_DONE is returned. If it is not finished, but no error occurred,
+** SQLITE_OK is returned. Otherwise an SQLite error code.
+*/
+SQLITE_PRIVATE int sqlite3BtreeIncrVacuum(Btree *p){
+ int rc;
+ BtShared *pBt = p->pBt;
+
+ sqlite3BtreeEnter(p);
+ assert( pBt->inTransaction==TRANS_WRITE && p->inTrans==TRANS_WRITE );
+ if( !pBt->autoVacuum ){
+ rc = SQLITE_DONE;
+ }else{
+ Pgno nOrig = btreePagecount(pBt);
+ Pgno nFree = get4byte(&pBt->pPage1->aData[36]);
+ Pgno nFin = finalDbSize(pBt, nOrig, nFree);
+
+ if( nOrig<nFin ){
+ rc = SQLITE_CORRUPT_BKPT;
+ }else if( nFree>0 ){
+ rc = saveAllCursors(pBt, 0, 0);
+ if( rc==SQLITE_OK ){
+ invalidateAllOverflowCache(pBt);
+ rc = incrVacuumStep(pBt, nFin, nOrig, 0);
+ }
+ if( rc==SQLITE_OK ){
+ rc = sqlite3PagerWrite(pBt->pPage1->pDbPage);
+ put4byte(&pBt->pPage1->aData[28], pBt->nPage);
+ }
+ }else{
+ rc = SQLITE_DONE;
+ }
+ }
+ sqlite3BtreeLeave(p);
+ return rc;
+}
+
+/*
+** This routine is called prior to sqlite3PagerCommit when a transaction
+** is committed for an auto-vacuum database.
+**
+** If SQLITE_OK is returned, then *pnTrunc is set to the number of pages
+** the database file should be truncated to during the commit process.
+** i.e. the database has been reorganized so that only the first *pnTrunc
+** pages are in use.
+*/
+static int autoVacuumCommit(BtShared *pBt){
+ int rc = SQLITE_OK;
+ Pager *pPager = pBt->pPager;
+ VVA_ONLY( int nRef = sqlite3PagerRefcount(pPager); )
+
+ assert( sqlite3_mutex_held(pBt->mutex) );
+ invalidateAllOverflowCache(pBt);
+ assert(pBt->autoVacuum);
+ if( !pBt->incrVacuum ){
+ Pgno nFin; /* Number of pages in database after autovacuuming */
+ Pgno nFree; /* Number of pages on the freelist initially */
+ Pgno iFree; /* The next page to be freed */
+ Pgno nOrig; /* Database size before freeing */
+
+ nOrig = btreePagecount(pBt);
+ if( PTRMAP_ISPAGE(pBt, nOrig) || nOrig==PENDING_BYTE_PAGE(pBt) ){
+ /* It is not possible to create a database for which the final page
+ ** is either a pointer-map page or the pending-byte page. If one
+ ** is encountered, this indicates corruption.
+ */
+ return SQLITE_CORRUPT_BKPT;
+ }
+
+ nFree = get4byte(&pBt->pPage1->aData[36]);
+ nFin = finalDbSize(pBt, nOrig, nFree);
+ if( nFin>nOrig ) return SQLITE_CORRUPT_BKPT;
+ if( nFin<nOrig ){
+ rc = saveAllCursors(pBt, 0, 0);
+ }
+ for(iFree=nOrig; iFree>nFin && rc==SQLITE_OK; iFree--){
+ rc = incrVacuumStep(pBt, nFin, iFree, 1);
+ }
+ if( (rc==SQLITE_DONE || rc==SQLITE_OK) && nFree>0 ){
+ rc = sqlite3PagerWrite(pBt->pPage1->pDbPage);
+ put4byte(&pBt->pPage1->aData[32], 0);
+ put4byte(&pBt->pPage1->aData[36], 0);
+ put4byte(&pBt->pPage1->aData[28], nFin);
+ pBt->bDoTruncate = 1;
+ pBt->nPage = nFin;
+ }
+ if( rc!=SQLITE_OK ){
+ sqlite3PagerRollback(pPager);
+ }
+ }
+
+ assert( nRef>=sqlite3PagerRefcount(pPager) );
+ return rc;
+}
+
+#else /* ifndef SQLITE_OMIT_AUTOVACUUM */
+# define setChildPtrmaps(x) SQLITE_OK
+#endif
+
+/*
+** This routine does the first phase of a two-phase commit. This routine
+** causes a rollback journal to be created (if it does not already exist)
+** and populated with enough information so that if a power loss occurs
+** the database can be restored to its original state by playing back
+** the journal. Then the contents of the journal are flushed out to
+** the disk. After the journal is safely on oxide, the changes to the
+** database are written into the database file and flushed to oxide.
+** At the end of this call, the rollback journal still exists on the
+** disk and we are still holding all locks, so the transaction has not
+** committed. See sqlite3BtreeCommitPhaseTwo() for the second phase of the
+** commit process.
+**
+** This call is a no-op if no write-transaction is currently active on pBt.
+**
+** Otherwise, sync the database file for the btree pBt. zMaster points to
+** the name of a master journal file that should be written into the
+** individual journal file, or is NULL, indicating no master journal file
+** (single database transaction).
+**
+** When this is called, the master journal should already have been
+** created, populated with this journal pointer and synced to disk.
+**
+** Once this is routine has returned, the only thing required to commit
+** the write-transaction for this database file is to delete the journal.
+*/
+SQLITE_PRIVATE int sqlite3BtreeCommitPhaseOne(Btree *p, const char *zMaster){
+ int rc = SQLITE_OK;
+ if( p->inTrans==TRANS_WRITE ){
+ BtShared *pBt = p->pBt;
+ sqlite3BtreeEnter(p);
+#ifndef SQLITE_OMIT_AUTOVACUUM
+ if( pBt->autoVacuum ){
+ rc = autoVacuumCommit(pBt);
+ if( rc!=SQLITE_OK ){
+ sqlite3BtreeLeave(p);
+ return rc;
+ }
+ }
+ if( pBt->bDoTruncate ){
+ sqlite3PagerTruncateImage(pBt->pPager, pBt->nPage);
+ }
+#endif
+ rc = sqlite3PagerCommitPhaseOne(pBt->pPager, zMaster, 0);
+ sqlite3BtreeLeave(p);
+ }
+ return rc;
+}
+
+/*
+** This function is called from both BtreeCommitPhaseTwo() and BtreeRollback()
+** at the conclusion of a transaction.
+*/
+static void btreeEndTransaction(Btree *p){
+ BtShared *pBt = p->pBt;
+ sqlite3 *db = p->db;
+ assert( sqlite3BtreeHoldsMutex(p) );
+
+#ifndef SQLITE_OMIT_AUTOVACUUM
+ pBt->bDoTruncate = 0;
+#endif
+ if( p->inTrans>TRANS_NONE && db->nVdbeRead>1 ){
+ /* If there are other active statements that belong to this database
+ ** handle, downgrade to a read-only transaction. The other statements
+ ** may still be reading from the database. */
+ downgradeAllSharedCacheTableLocks(p);
+ p->inTrans = TRANS_READ;
+ }else{
+ /* If the handle had any kind of transaction open, decrement the
+ ** transaction count of the shared btree. If the transaction count
+ ** reaches 0, set the shared state to TRANS_NONE. The unlockBtreeIfUnused()
+ ** call below will unlock the pager. */
+ if( p->inTrans!=TRANS_NONE ){
+ clearAllSharedCacheTableLocks(p);
+ pBt->nTransaction--;
+ if( 0==pBt->nTransaction ){
+ pBt->inTransaction = TRANS_NONE;
+ }
+ }
+
+ /* Set the current transaction state to TRANS_NONE and unlock the
+ ** pager if this call closed the only read or write transaction. */
+ p->inTrans = TRANS_NONE;
+ unlockBtreeIfUnused(pBt);
+ }
+
+ btreeIntegrity(p);
+}
+
+/*
+** Commit the transaction currently in progress.
+**
+** This routine implements the second phase of a 2-phase commit. The
+** sqlite3BtreeCommitPhaseOne() routine does the first phase and should
+** be invoked prior to calling this routine. The sqlite3BtreeCommitPhaseOne()
+** routine did all the work of writing information out to disk and flushing the
+** contents so that they are written onto the disk platter. All this
+** routine has to do is delete or truncate or zero the header in the
+** the rollback journal (which causes the transaction to commit) and
+** drop locks.
+**
+** Normally, if an error occurs while the pager layer is attempting to
+** finalize the underlying journal file, this function returns an error and
+** the upper layer will attempt a rollback. However, if the second argument
+** is non-zero then this b-tree transaction is part of a multi-file
+** transaction. In this case, the transaction has already been committed
+** (by deleting a master journal file) and the caller will ignore this
+** functions return code. So, even if an error occurs in the pager layer,
+** reset the b-tree objects internal state to indicate that the write
+** transaction has been closed. This is quite safe, as the pager will have
+** transitioned to the error state.
+**
+** This will release the write lock on the database file. If there
+** are no active cursors, it also releases the read lock.
+*/
+SQLITE_PRIVATE int sqlite3BtreeCommitPhaseTwo(Btree *p, int bCleanup){
+
+ if( p->inTrans==TRANS_NONE ) return SQLITE_OK;
+ sqlite3BtreeEnter(p);
+ btreeIntegrity(p);
+
+ /* If the handle has a write-transaction open, commit the shared-btrees
+ ** transaction and set the shared state to TRANS_READ.
+ */
+ if( p->inTrans==TRANS_WRITE ){
+ int rc;
+ BtShared *pBt = p->pBt;
+ assert( pBt->inTransaction==TRANS_WRITE );
+ assert( pBt->nTransaction>0 );
+ rc = sqlite3PagerCommitPhaseTwo(pBt->pPager);
+ if( rc!=SQLITE_OK && bCleanup==0 ){
+ sqlite3BtreeLeave(p);
+ return rc;
+ }
+ p->iDataVersion--; /* Compensate for pPager->iDataVersion++; */
+ pBt->inTransaction = TRANS_READ;
+ btreeClearHasContent(pBt);
+ }
+
+ btreeEndTransaction(p);
+ sqlite3BtreeLeave(p);
+ return SQLITE_OK;
+}
+
+/*
+** Do both phases of a commit.
+*/
+SQLITE_PRIVATE int sqlite3BtreeCommit(Btree *p){
+ int rc;
+ sqlite3BtreeEnter(p);
+ rc = sqlite3BtreeCommitPhaseOne(p, 0);
+ if( rc==SQLITE_OK ){
+ rc = sqlite3BtreeCommitPhaseTwo(p, 0);
+ }
+ sqlite3BtreeLeave(p);
+ return rc;
+}
+
+/*
+** This routine sets the state to CURSOR_FAULT and the error
+** code to errCode for every cursor on any BtShared that pBtree
+** references. Or if the writeOnly flag is set to 1, then only
+** trip write cursors and leave read cursors unchanged.
+**
+** Every cursor is a candidate to be tripped, including cursors
+** that belong to other database connections that happen to be
+** sharing the cache with pBtree.
+**
+** This routine gets called when a rollback occurs. If the writeOnly
+** flag is true, then only write-cursors need be tripped - read-only
+** cursors save their current positions so that they may continue
+** following the rollback. Or, if writeOnly is false, all cursors are
+** tripped. In general, writeOnly is false if the transaction being
+** rolled back modified the database schema. In this case b-tree root
+** pages may be moved or deleted from the database altogether, making
+** it unsafe for read cursors to continue.
+**
+** If the writeOnly flag is true and an error is encountered while
+** saving the current position of a read-only cursor, all cursors,
+** including all read-cursors are tripped.
+**
+** SQLITE_OK is returned if successful, or if an error occurs while
+** saving a cursor position, an SQLite error code.
+*/
+SQLITE_PRIVATE int sqlite3BtreeTripAllCursors(Btree *pBtree, int errCode, int writeOnly){
+ BtCursor *p;
+ int rc = SQLITE_OK;
+
+ assert( (writeOnly==0 || writeOnly==1) && BTCF_WriteFlag==1 );
+ if( pBtree ){
+ sqlite3BtreeEnter(pBtree);
+ for(p=pBtree->pBt->pCursor; p; p=p->pNext){
+ int i;
+ if( writeOnly && (p->curFlags & BTCF_WriteFlag)==0 ){
+ if( p->eState==CURSOR_VALID || p->eState==CURSOR_SKIPNEXT ){
+ rc = saveCursorPosition(p);
+ if( rc!=SQLITE_OK ){
+ (void)sqlite3BtreeTripAllCursors(pBtree, rc, 0);
+ break;
+ }
+ }
+ }else{
+ sqlite3BtreeClearCursor(p);
+ p->eState = CURSOR_FAULT;
+ p->skipNext = errCode;
+ }
+ for(i=0; i<=p->iPage; i++){
+ releasePage(p->apPage[i]);
+ p->apPage[i] = 0;
+ }
+ }
+ sqlite3BtreeLeave(pBtree);
+ }
+ return rc;
+}
+
+/*
+** Rollback the transaction in progress.
+**
+** If tripCode is not SQLITE_OK then cursors will be invalidated (tripped).
+** Only write cursors are tripped if writeOnly is true but all cursors are
+** tripped if writeOnly is false. Any attempt to use
+** a tripped cursor will result in an error.
+**
+** This will release the write lock on the database file. If there
+** are no active cursors, it also releases the read lock.
+*/
+SQLITE_PRIVATE int sqlite3BtreeRollback(Btree *p, int tripCode, int writeOnly){
+ int rc;
+ BtShared *pBt = p->pBt;
+ MemPage *pPage1;
+
+ assert( writeOnly==1 || writeOnly==0 );
+ assert( tripCode==SQLITE_ABORT_ROLLBACK || tripCode==SQLITE_OK );
+ sqlite3BtreeEnter(p);
+ if( tripCode==SQLITE_OK ){
+ rc = tripCode = saveAllCursors(pBt, 0, 0);
+ if( rc ) writeOnly = 0;
+ }else{
+ rc = SQLITE_OK;
+ }
+ if( tripCode ){
+ int rc2 = sqlite3BtreeTripAllCursors(p, tripCode, writeOnly);
+ assert( rc==SQLITE_OK || (writeOnly==0 && rc2==SQLITE_OK) );
+ if( rc2!=SQLITE_OK ) rc = rc2;
+ }
+ btreeIntegrity(p);
+
+ if( p->inTrans==TRANS_WRITE ){
+ int rc2;
+
+ assert( TRANS_WRITE==pBt->inTransaction );
+ rc2 = sqlite3PagerRollback(pBt->pPager);
+ if( rc2!=SQLITE_OK ){
+ rc = rc2;
+ }
+
+ /* The rollback may have destroyed the pPage1->aData value. So
+ ** call btreeGetPage() on page 1 again to make
+ ** sure pPage1->aData is set correctly. */
+ if( btreeGetPage(pBt, 1, &pPage1, 0)==SQLITE_OK ){
+ int nPage = get4byte(28+(u8*)pPage1->aData);
+ testcase( nPage==0 );
+ if( nPage==0 ) sqlite3PagerPagecount(pBt->pPager, &nPage);
+ testcase( pBt->nPage!=nPage );
+ pBt->nPage = nPage;
+ releasePage(pPage1);
+ }
+ assert( countValidCursors(pBt, 1)==0 );
+ pBt->inTransaction = TRANS_READ;
+ btreeClearHasContent(pBt);
+ }
+
+ btreeEndTransaction(p);
+ sqlite3BtreeLeave(p);
+ return rc;
+}
+
+/*
+** Start a statement subtransaction. The subtransaction can be rolled
+** back independently of the main transaction. You must start a transaction
+** before starting a subtransaction. The subtransaction is ended automatically
+** if the main transaction commits or rolls back.
+**
+** Statement subtransactions are used around individual SQL statements
+** that are contained within a BEGIN...COMMIT block. If a constraint
+** error occurs within the statement, the effect of that one statement
+** can be rolled back without having to rollback the entire transaction.
+**
+** A statement sub-transaction is implemented as an anonymous savepoint. The
+** value passed as the second parameter is the total number of savepoints,
+** including the new anonymous savepoint, open on the B-Tree. i.e. if there
+** are no active savepoints and no other statement-transactions open,
+** iStatement is 1. This anonymous savepoint can be released or rolled back
+** using the sqlite3BtreeSavepoint() function.
+*/
+SQLITE_PRIVATE int sqlite3BtreeBeginStmt(Btree *p, int iStatement){
+ int rc;
+ BtShared *pBt = p->pBt;
+ sqlite3BtreeEnter(p);
+ assert( p->inTrans==TRANS_WRITE );
+ assert( (pBt->btsFlags & BTS_READ_ONLY)==0 );
+ assert( iStatement>0 );
+ assert( iStatement>p->db->nSavepoint );
+ assert( pBt->inTransaction==TRANS_WRITE );
+ /* At the pager level, a statement transaction is a savepoint with
+ ** an index greater than all savepoints created explicitly using
+ ** SQL statements. It is illegal to open, release or rollback any
+ ** such savepoints while the statement transaction savepoint is active.
+ */
+ rc = sqlite3PagerOpenSavepoint(pBt->pPager, iStatement);
+ sqlite3BtreeLeave(p);
+ return rc;
+}
+
+/*
+** The second argument to this function, op, is always SAVEPOINT_ROLLBACK
+** or SAVEPOINT_RELEASE. This function either releases or rolls back the
+** savepoint identified by parameter iSavepoint, depending on the value
+** of op.
+**
+** Normally, iSavepoint is greater than or equal to zero. However, if op is
+** SAVEPOINT_ROLLBACK, then iSavepoint may also be -1. In this case the
+** contents of the entire transaction are rolled back. This is different
+** from a normal transaction rollback, as no locks are released and the
+** transaction remains open.
+*/
+SQLITE_PRIVATE int sqlite3BtreeSavepoint(Btree *p, int op, int iSavepoint){
+ int rc = SQLITE_OK;
+ if( p && p->inTrans==TRANS_WRITE ){
+ BtShared *pBt = p->pBt;
+ assert( op==SAVEPOINT_RELEASE || op==SAVEPOINT_ROLLBACK );
+ assert( iSavepoint>=0 || (iSavepoint==-1 && op==SAVEPOINT_ROLLBACK) );
+ sqlite3BtreeEnter(p);
+ if( op==SAVEPOINT_ROLLBACK ){
+ rc = saveAllCursors(pBt, 0, 0);
+ }
+ if( rc==SQLITE_OK ){
+ rc = sqlite3PagerSavepoint(pBt->pPager, op, iSavepoint);
+ }
+ if( rc==SQLITE_OK ){
+ if( iSavepoint<0 && (pBt->btsFlags & BTS_INITIALLY_EMPTY)!=0 ){
+ pBt->nPage = 0;
+ }
+ rc = newDatabase(pBt);
+ pBt->nPage = get4byte(28 + pBt->pPage1->aData);
+
+ /* The database size was written into the offset 28 of the header
+ ** when the transaction started, so we know that the value at offset
+ ** 28 is nonzero. */
+ assert( pBt->nPage>0 );
+ }
+ sqlite3BtreeLeave(p);
+ }
+ return rc;
+}
+
+/*
+** Create a new cursor for the BTree whose root is on the page
+** iTable. If a read-only cursor is requested, it is assumed that
+** the caller already has at least a read-only transaction open
+** on the database already. If a write-cursor is requested, then
+** the caller is assumed to have an open write transaction.
+**
+** If the BTREE_WRCSR bit of wrFlag is clear, then the cursor can only
+** be used for reading. If the BTREE_WRCSR bit is set, then the cursor
+** can be used for reading or for writing if other conditions for writing
+** are also met. These are the conditions that must be met in order
+** for writing to be allowed:
+**
+** 1: The cursor must have been opened with wrFlag containing BTREE_WRCSR
+**
+** 2: Other database connections that share the same pager cache
+** but which are not in the READ_UNCOMMITTED state may not have
+** cursors open with wrFlag==0 on the same table. Otherwise
+** the changes made by this write cursor would be visible to
+** the read cursors in the other database connection.
+**
+** 3: The database must be writable (not on read-only media)
+**
+** 4: There must be an active transaction.
+**
+** The BTREE_FORDELETE bit of wrFlag may optionally be set if BTREE_WRCSR
+** is set. If FORDELETE is set, that is a hint to the implementation that
+** this cursor will only be used to seek to and delete entries of an index
+** as part of a larger DELETE statement. The FORDELETE hint is not used by
+** this implementation. But in a hypothetical alternative storage engine
+** in which index entries are automatically deleted when corresponding table
+** rows are deleted, the FORDELETE flag is a hint that all SEEK and DELETE
+** operations on this cursor can be no-ops and all READ operations can
+** return a null row (2-bytes: 0x01 0x00).
+**
+** No checking is done to make sure that page iTable really is the
+** root page of a b-tree. If it is not, then the cursor acquired
+** will not work correctly.
+**
+** It is assumed that the sqlite3BtreeCursorZero() has been called
+** on pCur to initialize the memory space prior to invoking this routine.
+*/
+static int btreeCursor(
+ Btree *p, /* The btree */
+ int iTable, /* Root page of table to open */
+ int wrFlag, /* 1 to write. 0 read-only */
+ struct KeyInfo *pKeyInfo, /* First arg to comparison function */
+ BtCursor *pCur /* Space for new cursor */
+){
+ BtShared *pBt = p->pBt; /* Shared b-tree handle */
+ BtCursor *pX; /* Looping over other all cursors */
+
+ assert( sqlite3BtreeHoldsMutex(p) );
+ assert( wrFlag==0
+ || wrFlag==BTREE_WRCSR
+ || wrFlag==(BTREE_WRCSR|BTREE_FORDELETE)
+ );
+
+ /* The following assert statements verify that if this is a sharable
+ ** b-tree database, the connection is holding the required table locks,
+ ** and that no other connection has any open cursor that conflicts with
+ ** this lock. */
+ assert( hasSharedCacheTableLock(p, iTable, pKeyInfo!=0, (wrFlag?2:1)) );
+ assert( wrFlag==0 || !hasReadConflicts(p, iTable) );
+
+ /* Assert that the caller has opened the required transaction. */
+ assert( p->inTrans>TRANS_NONE );
+ assert( wrFlag==0 || p->inTrans==TRANS_WRITE );
+ assert( pBt->pPage1 && pBt->pPage1->aData );
+ assert( wrFlag==0 || (pBt->btsFlags & BTS_READ_ONLY)==0 );
+
+ if( wrFlag ){
+ allocateTempSpace(pBt);
+ if( pBt->pTmpSpace==0 ) return SQLITE_NOMEM_BKPT;
+ }
+ if( iTable==1 && btreePagecount(pBt)==0 ){
+ assert( wrFlag==0 );
+ iTable = 0;
+ }
+
+ /* Now that no other errors can occur, finish filling in the BtCursor
+ ** variables and link the cursor into the BtShared list. */
+ pCur->pgnoRoot = (Pgno)iTable;
+ pCur->iPage = -1;
+ pCur->pKeyInfo = pKeyInfo;
+ pCur->pBtree = p;
+ pCur->pBt = pBt;
+ pCur->curFlags = wrFlag ? BTCF_WriteFlag : 0;
+ pCur->curPagerFlags = wrFlag ? 0 : PAGER_GET_READONLY;
+ /* If there are two or more cursors on the same btree, then all such
+ ** cursors *must* have the BTCF_Multiple flag set. */
+ for(pX=pBt->pCursor; pX; pX=pX->pNext){
+ if( pX->pgnoRoot==(Pgno)iTable ){
+ pX->curFlags |= BTCF_Multiple;
+ pCur->curFlags |= BTCF_Multiple;
+ }
+ }
+ pCur->pNext = pBt->pCursor;
+ pBt->pCursor = pCur;
+ pCur->eState = CURSOR_INVALID;
+ return SQLITE_OK;
+}
+SQLITE_PRIVATE int sqlite3BtreeCursor(
+ Btree *p, /* The btree */
+ int iTable, /* Root page of table to open */
+ int wrFlag, /* 1 to write. 0 read-only */
+ struct KeyInfo *pKeyInfo, /* First arg to xCompare() */
+ BtCursor *pCur /* Write new cursor here */
+){
+ int rc;
+ if( iTable<1 ){
+ rc = SQLITE_CORRUPT_BKPT;
+ }else{
+ sqlite3BtreeEnter(p);
+ rc = btreeCursor(p, iTable, wrFlag, pKeyInfo, pCur);
+ sqlite3BtreeLeave(p);
+ }
+ return rc;
+}
+
+/*
+** Return the size of a BtCursor object in bytes.
+**
+** This interfaces is needed so that users of cursors can preallocate
+** sufficient storage to hold a cursor. The BtCursor object is opaque
+** to users so they cannot do the sizeof() themselves - they must call
+** this routine.
+*/
+SQLITE_PRIVATE int sqlite3BtreeCursorSize(void){
+ return ROUND8(sizeof(BtCursor));
+}
+
+/*
+** Initialize memory that will be converted into a BtCursor object.
+**
+** The simple approach here would be to memset() the entire object
+** to zero. But it turns out that the apPage[] and aiIdx[] arrays
+** do not need to be zeroed and they are large, so we can save a lot
+** of run-time by skipping the initialization of those elements.
+*/
+SQLITE_PRIVATE void sqlite3BtreeCursorZero(BtCursor *p){
+ memset(p, 0, offsetof(BtCursor, iPage));
+}
+
+/*
+** Close a cursor. The read lock on the database file is released
+** when the last cursor is closed.
+*/
+SQLITE_PRIVATE int sqlite3BtreeCloseCursor(BtCursor *pCur){
+ Btree *pBtree = pCur->pBtree;
+ if( pBtree ){
+ int i;
+ BtShared *pBt = pCur->pBt;
+ sqlite3BtreeEnter(pBtree);
+ sqlite3BtreeClearCursor(pCur);
+ assert( pBt->pCursor!=0 );
+ if( pBt->pCursor==pCur ){
+ pBt->pCursor = pCur->pNext;
+ }else{
+ BtCursor *pPrev = pBt->pCursor;
+ do{
+ if( pPrev->pNext==pCur ){
+ pPrev->pNext = pCur->pNext;
+ break;
+ }
+ pPrev = pPrev->pNext;
+ }while( ALWAYS(pPrev) );
+ }
+ for(i=0; i<=pCur->iPage; i++){
+ releasePage(pCur->apPage[i]);
+ }
+ unlockBtreeIfUnused(pBt);
+ sqlite3_free(pCur->aOverflow);
+ /* sqlite3_free(pCur); */
+ sqlite3BtreeLeave(pBtree);
+ }
+ return SQLITE_OK;
+}
+
+/*
+** Make sure the BtCursor* given in the argument has a valid
+** BtCursor.info structure. If it is not already valid, call
+** btreeParseCell() to fill it in.
+**
+** BtCursor.info is a cache of the information in the current cell.
+** Using this cache reduces the number of calls to btreeParseCell().
+*/
+#ifndef NDEBUG
+ static void assertCellInfo(BtCursor *pCur){
+ CellInfo info;
+ int iPage = pCur->iPage;
+ memset(&info, 0, sizeof(info));
+ btreeParseCell(pCur->apPage[iPage], pCur->aiIdx[iPage], &info);
+ assert( CORRUPT_DB || memcmp(&info, &pCur->info, sizeof(info))==0 );
+ }
+#else
+ #define assertCellInfo(x)
+#endif
+static SQLITE_NOINLINE void getCellInfo(BtCursor *pCur){
+ if( pCur->info.nSize==0 ){
+ int iPage = pCur->iPage;
+ pCur->curFlags |= BTCF_ValidNKey;
+ btreeParseCell(pCur->apPage[iPage],pCur->aiIdx[iPage],&pCur->info);
+ }else{
+ assertCellInfo(pCur);
+ }
+}
+
+#ifndef NDEBUG /* The next routine used only within assert() statements */
+/*
+** Return true if the given BtCursor is valid. A valid cursor is one
+** that is currently pointing to a row in a (non-empty) table.
+** This is a verification routine is used only within assert() statements.
+*/
+SQLITE_PRIVATE int sqlite3BtreeCursorIsValid(BtCursor *pCur){
+ return pCur && pCur->eState==CURSOR_VALID;
+}
+#endif /* NDEBUG */
+SQLITE_PRIVATE int sqlite3BtreeCursorIsValidNN(BtCursor *pCur){
+ assert( pCur!=0 );
+ return pCur->eState==CURSOR_VALID;
+}
+
+/*
+** Return the value of the integer key or "rowid" for a table btree.
+** This routine is only valid for a cursor that is pointing into a
+** ordinary table btree. If the cursor points to an index btree or
+** is invalid, the result of this routine is undefined.
+*/
+SQLITE_PRIVATE i64 sqlite3BtreeIntegerKey(BtCursor *pCur){
+ assert( cursorHoldsMutex(pCur) );
+ assert( pCur->eState==CURSOR_VALID );
+ assert( pCur->curIntKey );
+ getCellInfo(pCur);
+ return pCur->info.nKey;
+}
+
+/*
+** Return the number of bytes of payload for the entry that pCur is
+** currently pointing to. For table btrees, this will be the amount
+** of data. For index btrees, this will be the size of the key.
+**
+** The caller must guarantee that the cursor is pointing to a non-NULL
+** valid entry. In other words, the calling procedure must guarantee
+** that the cursor has Cursor.eState==CURSOR_VALID.
+*/
+SQLITE_PRIVATE u32 sqlite3BtreePayloadSize(BtCursor *pCur){
+ assert( cursorHoldsMutex(pCur) );
+ assert( pCur->eState==CURSOR_VALID );
+ getCellInfo(pCur);
+ return pCur->info.nPayload;
+}
+
+/*
+** Given the page number of an overflow page in the database (parameter
+** ovfl), this function finds the page number of the next page in the
+** linked list of overflow pages. If possible, it uses the auto-vacuum
+** pointer-map data instead of reading the content of page ovfl to do so.
+**
+** If an error occurs an SQLite error code is returned. Otherwise:
+**
+** The page number of the next overflow page in the linked list is
+** written to *pPgnoNext. If page ovfl is the last page in its linked
+** list, *pPgnoNext is set to zero.
+**
+** If ppPage is not NULL, and a reference to the MemPage object corresponding
+** to page number pOvfl was obtained, then *ppPage is set to point to that
+** reference. It is the responsibility of the caller to call releasePage()
+** on *ppPage to free the reference. In no reference was obtained (because
+** the pointer-map was used to obtain the value for *pPgnoNext), then
+** *ppPage is set to zero.
+*/
+static int getOverflowPage(
+ BtShared *pBt, /* The database file */
+ Pgno ovfl, /* Current overflow page number */
+ MemPage **ppPage, /* OUT: MemPage handle (may be NULL) */
+ Pgno *pPgnoNext /* OUT: Next overflow page number */
+){
+ Pgno next = 0;
+ MemPage *pPage = 0;
+ int rc = SQLITE_OK;
+
+ assert( sqlite3_mutex_held(pBt->mutex) );
+ assert(pPgnoNext);
+
+#ifndef SQLITE_OMIT_AUTOVACUUM
+ /* Try to find the next page in the overflow list using the
+ ** autovacuum pointer-map pages. Guess that the next page in
+ ** the overflow list is page number (ovfl+1). If that guess turns
+ ** out to be wrong, fall back to loading the data of page
+ ** number ovfl to determine the next page number.
+ */
+ if( pBt->autoVacuum ){
+ Pgno pgno;
+ Pgno iGuess = ovfl+1;
+ u8 eType;
+
+ while( PTRMAP_ISPAGE(pBt, iGuess) || iGuess==PENDING_BYTE_PAGE(pBt) ){
+ iGuess++;
+ }
+
+ if( iGuess<=btreePagecount(pBt) ){
+ rc = ptrmapGet(pBt, iGuess, &eType, &pgno);
+ if( rc==SQLITE_OK && eType==PTRMAP_OVERFLOW2 && pgno==ovfl ){
+ next = iGuess;
+ rc = SQLITE_DONE;
+ }
+ }
+ }
+#endif
+
+ assert( next==0 || rc==SQLITE_DONE );
+ if( rc==SQLITE_OK ){
+ rc = btreeGetPage(pBt, ovfl, &pPage, (ppPage==0) ? PAGER_GET_READONLY : 0);
+ assert( rc==SQLITE_OK || pPage==0 );
+ if( rc==SQLITE_OK ){
+ next = get4byte(pPage->aData);
+ }
+ }
+
+ *pPgnoNext = next;
+ if( ppPage ){
+ *ppPage = pPage;
+ }else{
+ releasePage(pPage);
+ }
+ return (rc==SQLITE_DONE ? SQLITE_OK : rc);
+}
+
+/*
+** Copy data from a buffer to a page, or from a page to a buffer.
+**
+** pPayload is a pointer to data stored on database page pDbPage.
+** If argument eOp is false, then nByte bytes of data are copied
+** from pPayload to the buffer pointed at by pBuf. If eOp is true,
+** then sqlite3PagerWrite() is called on pDbPage and nByte bytes
+** of data are copied from the buffer pBuf to pPayload.
+**
+** SQLITE_OK is returned on success, otherwise an error code.
+*/
+static int copyPayload(
+ void *pPayload, /* Pointer to page data */
+ void *pBuf, /* Pointer to buffer */
+ int nByte, /* Number of bytes to copy */
+ int eOp, /* 0 -> copy from page, 1 -> copy to page */
+ DbPage *pDbPage /* Page containing pPayload */
+){
+ if( eOp ){
+ /* Copy data from buffer to page (a write operation) */
+ int rc = sqlite3PagerWrite(pDbPage);
+ if( rc!=SQLITE_OK ){
+ return rc;
+ }
+ memcpy(pPayload, pBuf, nByte);
+ }else{
+ /* Copy data from page to buffer (a read operation) */
+ memcpy(pBuf, pPayload, nByte);
+ }
+ return SQLITE_OK;
+}
+
+/*
+** This function is used to read or overwrite payload information
+** for the entry that the pCur cursor is pointing to. The eOp
+** argument is interpreted as follows:
+**
+** 0: The operation is a read. Populate the overflow cache.
+** 1: The operation is a write. Populate the overflow cache.
+**
+** A total of "amt" bytes are read or written beginning at "offset".
+** Data is read to or from the buffer pBuf.
+**
+** The content being read or written might appear on the main page
+** or be scattered out on multiple overflow pages.
+**
+** If the current cursor entry uses one or more overflow pages
+** this function may allocate space for and lazily populate
+** the overflow page-list cache array (BtCursor.aOverflow).
+** Subsequent calls use this cache to make seeking to the supplied offset
+** more efficient.
+**
+** Once an overflow page-list cache has been allocated, it must be
+** invalidated if some other cursor writes to the same table, or if
+** the cursor is moved to a different row. Additionally, in auto-vacuum
+** mode, the following events may invalidate an overflow page-list cache.
+**
+** * An incremental vacuum,
+** * A commit in auto_vacuum="full" mode,
+** * Creating a table (may require moving an overflow page).
+*/
+static int accessPayload(
+ BtCursor *pCur, /* Cursor pointing to entry to read from */
+ u32 offset, /* Begin reading this far into payload */
+ u32 amt, /* Read this many bytes */
+ unsigned char *pBuf, /* Write the bytes into this buffer */
+ int eOp /* zero to read. non-zero to write. */
+){
+ unsigned char *aPayload;
+ int rc = SQLITE_OK;
+ int iIdx = 0;
+ MemPage *pPage = pCur->apPage[pCur->iPage]; /* Btree page of current entry */
+ BtShared *pBt = pCur->pBt; /* Btree this cursor belongs to */
+#ifdef SQLITE_DIRECT_OVERFLOW_READ
+ unsigned char * const pBufStart = pBuf; /* Start of original out buffer */
+#endif
+
+ assert( pPage );
+ assert( eOp==0 || eOp==1 );
+ assert( pCur->eState==CURSOR_VALID );
+ assert( pCur->aiIdx[pCur->iPage]<pPage->nCell );
+ assert( cursorHoldsMutex(pCur) );
+
+ getCellInfo(pCur);
+ aPayload = pCur->info.pPayload;
+ assert( offset+amt <= pCur->info.nPayload );
+
+ assert( aPayload > pPage->aData );
+ if( (uptr)(aPayload - pPage->aData) > (pBt->usableSize - pCur->info.nLocal) ){
+ /* Trying to read or write past the end of the data is an error. The
+ ** conditional above is really:
+ ** &aPayload[pCur->info.nLocal] > &pPage->aData[pBt->usableSize]
+ ** but is recast into its current form to avoid integer overflow problems
+ */
+ return SQLITE_CORRUPT_BKPT;
+ }
+
+ /* Check if data must be read/written to/from the btree page itself. */
+ if( offset<pCur->info.nLocal ){
+ int a = amt;
+ if( a+offset>pCur->info.nLocal ){
+ a = pCur->info.nLocal - offset;
+ }
+ rc = copyPayload(&aPayload[offset], pBuf, a, eOp, pPage->pDbPage);
+ offset = 0;
+ pBuf += a;
+ amt -= a;
+ }else{
+ offset -= pCur->info.nLocal;
+ }
+
+
+ if( rc==SQLITE_OK && amt>0 ){
+ const u32 ovflSize = pBt->usableSize - 4; /* Bytes content per ovfl page */
+ Pgno nextPage;
+
+ nextPage = get4byte(&aPayload[pCur->info.nLocal]);
+
+ /* If the BtCursor.aOverflow[] has not been allocated, allocate it now.
+ **
+ ** The aOverflow[] array is sized at one entry for each overflow page
+ ** in the overflow chain. The page number of the first overflow page is
+ ** stored in aOverflow[0], etc. A value of 0 in the aOverflow[] array
+ ** means "not yet known" (the cache is lazily populated).
+ */
+ if( (pCur->curFlags & BTCF_ValidOvfl)==0 ){
+ int nOvfl = (pCur->info.nPayload-pCur->info.nLocal+ovflSize-1)/ovflSize;
+ if( nOvfl>pCur->nOvflAlloc ){
+ Pgno *aNew = (Pgno*)sqlite3Realloc(
+ pCur->aOverflow, nOvfl*2*sizeof(Pgno)
+ );
+ if( aNew==0 ){
+ return SQLITE_NOMEM_BKPT;
+ }else{
+ pCur->nOvflAlloc = nOvfl*2;
+ pCur->aOverflow = aNew;
+ }
+ }
+ memset(pCur->aOverflow, 0, nOvfl*sizeof(Pgno));
+ pCur->curFlags |= BTCF_ValidOvfl;
+ }else{
+ /* If the overflow page-list cache has been allocated and the
+ ** entry for the first required overflow page is valid, skip
+ ** directly to it.
+ */
+ if( pCur->aOverflow[offset/ovflSize] ){
+ iIdx = (offset/ovflSize);
+ nextPage = pCur->aOverflow[iIdx];
+ offset = (offset%ovflSize);
+ }
+ }
+
+ assert( rc==SQLITE_OK && amt>0 );
+ while( nextPage ){
+ /* If required, populate the overflow page-list cache. */
+ assert( pCur->aOverflow[iIdx]==0
+ || pCur->aOverflow[iIdx]==nextPage
+ || CORRUPT_DB );
+ pCur->aOverflow[iIdx] = nextPage;
+
+ if( offset>=ovflSize ){
+ /* The only reason to read this page is to obtain the page
+ ** number for the next page in the overflow chain. The page
+ ** data is not required. So first try to lookup the overflow
+ ** page-list cache, if any, then fall back to the getOverflowPage()
+ ** function.
+ */
+ assert( pCur->curFlags & BTCF_ValidOvfl );
+ assert( pCur->pBtree->db==pBt->db );
+ if( pCur->aOverflow[iIdx+1] ){
+ nextPage = pCur->aOverflow[iIdx+1];
+ }else{
+ rc = getOverflowPage(pBt, nextPage, 0, &nextPage);
+ }
+ offset -= ovflSize;
+ }else{
+ /* Need to read this page properly. It contains some of the
+ ** range of data that is being read (eOp==0) or written (eOp!=0).
+ */
+#ifdef SQLITE_DIRECT_OVERFLOW_READ
+ sqlite3_file *fd; /* File from which to do direct overflow read */
+#endif
+ int a = amt;
+ if( a + offset > ovflSize ){
+ a = ovflSize - offset;
+ }
+
+#ifdef SQLITE_DIRECT_OVERFLOW_READ
+ /* If all the following are true:
+ **
+ ** 1) this is a read operation, and
+ ** 2) data is required from the start of this overflow page, and
+ ** 3) there is no open write-transaction, and
+ ** 4) the database is file-backed, and
+ ** 5) the page is not in the WAL file
+ ** 6) at least 4 bytes have already been read into the output buffer
+ **
+ ** then data can be read directly from the database file into the
+ ** output buffer, bypassing the page-cache altogether. This speeds
+ ** up loading large records that span many overflow pages.
+ */
+ if( eOp==0 /* (1) */
+ && offset==0 /* (2) */
+ && pBt->inTransaction==TRANS_READ /* (3) */
+ && (fd = sqlite3PagerFile(pBt->pPager))->pMethods /* (4) */
+ && 0==sqlite3PagerUseWal(pBt->pPager, nextPage) /* (5) */
+ && &pBuf[-4]>=pBufStart /* (6) */
+ ){
+ u8 aSave[4];
+ u8 *aWrite = &pBuf[-4];
+ assert( aWrite>=pBufStart ); /* due to (6) */
+ memcpy(aSave, aWrite, 4);
+ rc = sqlite3OsRead(fd, aWrite, a+4, (i64)pBt->pageSize*(nextPage-1));
+ nextPage = get4byte(aWrite);
+ memcpy(aWrite, aSave, 4);
+ }else
+#endif
+
+ {
+ DbPage *pDbPage;
+ rc = sqlite3PagerGet(pBt->pPager, nextPage, &pDbPage,
+ (eOp==0 ? PAGER_GET_READONLY : 0)
+ );
+ if( rc==SQLITE_OK ){
+ aPayload = sqlite3PagerGetData(pDbPage);
+ nextPage = get4byte(aPayload);
+ rc = copyPayload(&aPayload[offset+4], pBuf, a, eOp, pDbPage);
+ sqlite3PagerUnref(pDbPage);
+ offset = 0;
+ }
+ }
+ amt -= a;
+ if( amt==0 ) return rc;
+ pBuf += a;
+ }
+ if( rc ) break;
+ iIdx++;
+ }
+ }
+
+ if( rc==SQLITE_OK && amt>0 ){
+ return SQLITE_CORRUPT_BKPT; /* Overflow chain ends prematurely */
+ }
+ return rc;
+}
+
+/*
+** Read part of the payload for the row at which that cursor pCur is currently
+** pointing. "amt" bytes will be transferred into pBuf[]. The transfer
+** begins at "offset".
+**
+** pCur can be pointing to either a table or an index b-tree.
+** If pointing to a table btree, then the content section is read. If
+** pCur is pointing to an index b-tree then the key section is read.
+**
+** For sqlite3BtreePayload(), the caller must ensure that pCur is pointing
+** to a valid row in the table. For sqlite3BtreePayloadChecked(), the
+** cursor might be invalid or might need to be restored before being read.
+**
+** Return SQLITE_OK on success or an error code if anything goes
+** wrong. An error is returned if "offset+amt" is larger than
+** the available payload.
+*/
+SQLITE_PRIVATE int sqlite3BtreePayload(BtCursor *pCur, u32 offset, u32 amt, void *pBuf){
+ assert( cursorHoldsMutex(pCur) );
+ assert( pCur->eState==CURSOR_VALID );
+ assert( pCur->iPage>=0 && pCur->apPage[pCur->iPage] );
+ assert( pCur->aiIdx[pCur->iPage]<pCur->apPage[pCur->iPage]->nCell );
+ return accessPayload(pCur, offset, amt, (unsigned char*)pBuf, 0);
+}
+
+/*
+** This variant of sqlite3BtreePayload() works even if the cursor has not
+** in the CURSOR_VALID state. It is only used by the sqlite3_blob_read()
+** interface.
+*/
+#ifndef SQLITE_OMIT_INCRBLOB
+static SQLITE_NOINLINE int accessPayloadChecked(
+ BtCursor *pCur,
+ u32 offset,
+ u32 amt,
+ void *pBuf
+){
+ int rc;
+ if ( pCur->eState==CURSOR_INVALID ){
+ return SQLITE_ABORT;
+ }
+ assert( cursorOwnsBtShared(pCur) );
+ rc = btreeRestoreCursorPosition(pCur);
+ return rc ? rc : accessPayload(pCur, offset, amt, pBuf, 0);
+}
+SQLITE_PRIVATE int sqlite3BtreePayloadChecked(BtCursor *pCur, u32 offset, u32 amt, void *pBuf){
+ if( pCur->eState==CURSOR_VALID ){
+ assert( cursorOwnsBtShared(pCur) );
+ return accessPayload(pCur, offset, amt, pBuf, 0);
+ }else{
+ return accessPayloadChecked(pCur, offset, amt, pBuf);
+ }
+}
+#endif /* SQLITE_OMIT_INCRBLOB */
+
+/*
+** Return a pointer to payload information from the entry that the
+** pCur cursor is pointing to. The pointer is to the beginning of
+** the key if index btrees (pPage->intKey==0) and is the data for
+** table btrees (pPage->intKey==1). The number of bytes of available
+** key/data is written into *pAmt. If *pAmt==0, then the value
+** returned will not be a valid pointer.
+**
+** This routine is an optimization. It is common for the entire key
+** and data to fit on the local page and for there to be no overflow
+** pages. When that is so, this routine can be used to access the
+** key and data without making a copy. If the key and/or data spills
+** onto overflow pages, then accessPayload() must be used to reassemble
+** the key/data and copy it into a preallocated buffer.
+**
+** The pointer returned by this routine looks directly into the cached
+** page of the database. The data might change or move the next time
+** any btree routine is called.
+*/
+static const void *fetchPayload(
+ BtCursor *pCur, /* Cursor pointing to entry to read from */
+ u32 *pAmt /* Write the number of available bytes here */
+){
+ u32 amt;
+ assert( pCur!=0 && pCur->iPage>=0 && pCur->apPage[pCur->iPage]);
+ assert( pCur->eState==CURSOR_VALID );
+ assert( sqlite3_mutex_held(pCur->pBtree->db->mutex) );
+ assert( cursorOwnsBtShared(pCur) );
+ assert( pCur->aiIdx[pCur->iPage]<pCur->apPage[pCur->iPage]->nCell );
+ assert( pCur->info.nSize>0 );
+ assert( pCur->info.pPayload>pCur->apPage[pCur->iPage]->aData || CORRUPT_DB );
+ assert( pCur->info.pPayload<pCur->apPage[pCur->iPage]->aDataEnd ||CORRUPT_DB);
+ amt = (int)(pCur->apPage[pCur->iPage]->aDataEnd - pCur->info.pPayload);
+ if( pCur->info.nLocal<amt ) amt = pCur->info.nLocal;
+ *pAmt = amt;
+ return (void*)pCur->info.pPayload;
+}
+
+
+/*
+** For the entry that cursor pCur is point to, return as
+** many bytes of the key or data as are available on the local
+** b-tree page. Write the number of available bytes into *pAmt.
+**
+** The pointer returned is ephemeral. The key/data may move
+** or be destroyed on the next call to any Btree routine,
+** including calls from other threads against the same cache.
+** Hence, a mutex on the BtShared should be held prior to calling
+** this routine.
+**
+** These routines is used to get quick access to key and data
+** in the common case where no overflow pages are used.
+*/
+SQLITE_PRIVATE const void *sqlite3BtreePayloadFetch(BtCursor *pCur, u32 *pAmt){
+ return fetchPayload(pCur, pAmt);
+}
+
+
+/*
+** Move the cursor down to a new child page. The newPgno argument is the
+** page number of the child page to move to.
+**
+** This function returns SQLITE_CORRUPT if the page-header flags field of
+** the new child page does not match the flags field of the parent (i.e.
+** if an intkey page appears to be the parent of a non-intkey page, or
+** vice-versa).
+*/
+static int moveToChild(BtCursor *pCur, u32 newPgno){
+ BtShared *pBt = pCur->pBt;
+
+ assert( cursorOwnsBtShared(pCur) );
+ assert( pCur->eState==CURSOR_VALID );
+ assert( pCur->iPage<BTCURSOR_MAX_DEPTH );
+ assert( pCur->iPage>=0 );
+ if( pCur->iPage>=(BTCURSOR_MAX_DEPTH-1) ){
+ return SQLITE_CORRUPT_BKPT;
+ }
+ pCur->info.nSize = 0;
+ pCur->curFlags &= ~(BTCF_ValidNKey|BTCF_ValidOvfl);
+ pCur->iPage++;
+ pCur->aiIdx[pCur->iPage] = 0;
+ return getAndInitPage(pBt, newPgno, &pCur->apPage[pCur->iPage],
+ pCur, pCur->curPagerFlags);
+}
+
+#if SQLITE_DEBUG
+/*
+** Page pParent is an internal (non-leaf) tree page. This function
+** asserts that page number iChild is the left-child if the iIdx'th
+** cell in page pParent. Or, if iIdx is equal to the total number of
+** cells in pParent, that page number iChild is the right-child of
+** the page.
+*/
+static void assertParentIndex(MemPage *pParent, int iIdx, Pgno iChild){
+ if( CORRUPT_DB ) return; /* The conditions tested below might not be true
+ ** in a corrupt database */
+ assert( iIdx<=pParent->nCell );
+ if( iIdx==pParent->nCell ){
+ assert( get4byte(&pParent->aData[pParent->hdrOffset+8])==iChild );
+ }else{
+ assert( get4byte(findCell(pParent, iIdx))==iChild );
+ }
+}
+#else
+# define assertParentIndex(x,y,z)
+#endif
+
+/*
+** Move the cursor up to the parent page.
+**
+** pCur->idx is set to the cell index that contains the pointer
+** to the page we are coming from. If we are coming from the
+** right-most child page then pCur->idx is set to one more than
+** the largest cell index.
+*/
+static void moveToParent(BtCursor *pCur){
+ assert( cursorOwnsBtShared(pCur) );
+ assert( pCur->eState==CURSOR_VALID );
+ assert( pCur->iPage>0 );
+ assert( pCur->apPage[pCur->iPage] );
+ assertParentIndex(
+ pCur->apPage[pCur->iPage-1],
+ pCur->aiIdx[pCur->iPage-1],
+ pCur->apPage[pCur->iPage]->pgno
+ );
+ testcase( pCur->aiIdx[pCur->iPage-1] > pCur->apPage[pCur->iPage-1]->nCell );
+ pCur->info.nSize = 0;
+ pCur->curFlags &= ~(BTCF_ValidNKey|BTCF_ValidOvfl);
+ releasePageNotNull(pCur->apPage[pCur->iPage--]);
+}
+
+/*
+** Move the cursor to point to the root page of its b-tree structure.
+**
+** If the table has a virtual root page, then the cursor is moved to point
+** to the virtual root page instead of the actual root page. A table has a
+** virtual root page when the actual root page contains no cells and a
+** single child page. This can only happen with the table rooted at page 1.
+**
+** If the b-tree structure is empty, the cursor state is set to
+** CURSOR_INVALID. Otherwise, the cursor is set to point to the first
+** cell located on the root (or virtual root) page and the cursor state
+** is set to CURSOR_VALID.
+**
+** If this function returns successfully, it may be assumed that the
+** page-header flags indicate that the [virtual] root-page is the expected
+** kind of b-tree page (i.e. if when opening the cursor the caller did not
+** specify a KeyInfo structure the flags byte is set to 0x05 or 0x0D,
+** indicating a table b-tree, or if the caller did specify a KeyInfo
+** structure the flags byte is set to 0x02 or 0x0A, indicating an index
+** b-tree).
+*/
+static int moveToRoot(BtCursor *pCur){
+ MemPage *pRoot;
+ int rc = SQLITE_OK;
+
+ assert( cursorOwnsBtShared(pCur) );
+ assert( CURSOR_INVALID < CURSOR_REQUIRESEEK );
+ assert( CURSOR_VALID < CURSOR_REQUIRESEEK );
+ assert( CURSOR_FAULT > CURSOR_REQUIRESEEK );
+ if( pCur->eState>=CURSOR_REQUIRESEEK ){
+ if( pCur->eState==CURSOR_FAULT ){
+ assert( pCur->skipNext!=SQLITE_OK );
+ return pCur->skipNext;
+ }
+ sqlite3BtreeClearCursor(pCur);
+ }
+
+ if( pCur->iPage>=0 ){
+ if( pCur->iPage ){
+ do{
+ assert( pCur->apPage[pCur->iPage]!=0 );
+ releasePageNotNull(pCur->apPage[pCur->iPage--]);
+ }while( pCur->iPage);
+ goto skip_init;
+ }
+ }else if( pCur->pgnoRoot==0 ){
+ pCur->eState = CURSOR_INVALID;
+ return SQLITE_OK;
+ }else{
+ assert( pCur->iPage==(-1) );
+ rc = getAndInitPage(pCur->pBtree->pBt, pCur->pgnoRoot, &pCur->apPage[0],
+ 0, pCur->curPagerFlags);
+ if( rc!=SQLITE_OK ){
+ pCur->eState = CURSOR_INVALID;
+ return rc;
+ }
+ pCur->iPage = 0;
+ pCur->curIntKey = pCur->apPage[0]->intKey;
+ }
+ pRoot = pCur->apPage[0];
+ assert( pRoot->pgno==pCur->pgnoRoot );
+
+ /* If pCur->pKeyInfo is not NULL, then the caller that opened this cursor
+ ** expected to open it on an index b-tree. Otherwise, if pKeyInfo is
+ ** NULL, the caller expects a table b-tree. If this is not the case,
+ ** return an SQLITE_CORRUPT error.
+ **
+ ** Earlier versions of SQLite assumed that this test could not fail
+ ** if the root page was already loaded when this function was called (i.e.
+ ** if pCur->iPage>=0). But this is not so if the database is corrupted
+ ** in such a way that page pRoot is linked into a second b-tree table
+ ** (or the freelist). */
+ assert( pRoot->intKey==1 || pRoot->intKey==0 );
+ if( pRoot->isInit==0 || (pCur->pKeyInfo==0)!=pRoot->intKey ){
+ return SQLITE_CORRUPT_BKPT;
+ }
+
+skip_init:
+ pCur->aiIdx[0] = 0;
+ pCur->info.nSize = 0;
+ pCur->curFlags &= ~(BTCF_AtLast|BTCF_ValidNKey|BTCF_ValidOvfl);
+
+ pRoot = pCur->apPage[0];
+ if( pRoot->nCell>0 ){
+ pCur->eState = CURSOR_VALID;
+ }else if( !pRoot->leaf ){
+ Pgno subpage;
+ if( pRoot->pgno!=1 ) return SQLITE_CORRUPT_BKPT;
+ subpage = get4byte(&pRoot->aData[pRoot->hdrOffset+8]);
+ pCur->eState = CURSOR_VALID;
+ rc = moveToChild(pCur, subpage);
+ }else{
+ pCur->eState = CURSOR_INVALID;
+ }
+ return rc;
+}
+
+/*
+** Move the cursor down to the left-most leaf entry beneath the
+** entry to which it is currently pointing.
+**
+** The left-most leaf is the one with the smallest key - the first
+** in ascending order.
+*/
+static int moveToLeftmost(BtCursor *pCur){
+ Pgno pgno;
+ int rc = SQLITE_OK;
+ MemPage *pPage;
+
+ assert( cursorOwnsBtShared(pCur) );
+ assert( pCur->eState==CURSOR_VALID );
+ while( rc==SQLITE_OK && !(pPage = pCur->apPage[pCur->iPage])->leaf ){
+ assert( pCur->aiIdx[pCur->iPage]<pPage->nCell );
+ pgno = get4byte(findCell(pPage, pCur->aiIdx[pCur->iPage]));
+ rc = moveToChild(pCur, pgno);
+ }
+ return rc;
+}
+
+/*
+** Move the cursor down to the right-most leaf entry beneath the
+** page to which it is currently pointing. Notice the difference
+** between moveToLeftmost() and moveToRightmost(). moveToLeftmost()
+** finds the left-most entry beneath the *entry* whereas moveToRightmost()
+** finds the right-most entry beneath the *page*.
+**
+** The right-most entry is the one with the largest key - the last
+** key in ascending order.
+*/
+static int moveToRightmost(BtCursor *pCur){
+ Pgno pgno;
+ int rc = SQLITE_OK;
+ MemPage *pPage = 0;
+
+ assert( cursorOwnsBtShared(pCur) );
+ assert( pCur->eState==CURSOR_VALID );
+ while( !(pPage = pCur->apPage[pCur->iPage])->leaf ){
+ pgno = get4byte(&pPage->aData[pPage->hdrOffset+8]);
+ pCur->aiIdx[pCur->iPage] = pPage->nCell;
+ rc = moveToChild(pCur, pgno);
+ if( rc ) return rc;
+ }
+ pCur->aiIdx[pCur->iPage] = pPage->nCell-1;
+ assert( pCur->info.nSize==0 );
+ assert( (pCur->curFlags & BTCF_ValidNKey)==0 );
+ return SQLITE_OK;
+}
+
+/* Move the cursor to the first entry in the table. Return SQLITE_OK
+** on success. Set *pRes to 0 if the cursor actually points to something
+** or set *pRes to 1 if the table is empty.
+*/
+SQLITE_PRIVATE int sqlite3BtreeFirst(BtCursor *pCur, int *pRes){
+ int rc;
+
+ assert( cursorOwnsBtShared(pCur) );
+ assert( sqlite3_mutex_held(pCur->pBtree->db->mutex) );
+ rc = moveToRoot(pCur);
+ if( rc==SQLITE_OK ){
+ if( pCur->eState==CURSOR_INVALID ){
+ assert( pCur->pgnoRoot==0 || pCur->apPage[pCur->iPage]->nCell==0 );
+ *pRes = 1;
+ }else{
+ assert( pCur->apPage[pCur->iPage]->nCell>0 );
+ *pRes = 0;
+ rc = moveToLeftmost(pCur);
+ }
+ }
+ return rc;
+}
+
+/* Move the cursor to the last entry in the table. Return SQLITE_OK
+** on success. Set *pRes to 0 if the cursor actually points to something
+** or set *pRes to 1 if the table is empty.
+*/
+SQLITE_PRIVATE int sqlite3BtreeLast(BtCursor *pCur, int *pRes){
+ int rc;
+
+ assert( cursorOwnsBtShared(pCur) );
+ assert( sqlite3_mutex_held(pCur->pBtree->db->mutex) );
+
+ /* If the cursor already points to the last entry, this is a no-op. */
+ if( CURSOR_VALID==pCur->eState && (pCur->curFlags & BTCF_AtLast)!=0 ){
+#ifdef SQLITE_DEBUG
+ /* This block serves to assert() that the cursor really does point
+ ** to the last entry in the b-tree. */
+ int ii;
+ for(ii=0; ii<pCur->iPage; ii++){
+ assert( pCur->aiIdx[ii]==pCur->apPage[ii]->nCell );
+ }
+ assert( pCur->aiIdx[pCur->iPage]==pCur->apPage[pCur->iPage]->nCell-1 );
+ assert( pCur->apPage[pCur->iPage]->leaf );
+#endif
+ return SQLITE_OK;
+ }
+
+ rc = moveToRoot(pCur);
+ if( rc==SQLITE_OK ){
+ if( CURSOR_INVALID==pCur->eState ){
+ assert( pCur->pgnoRoot==0 || pCur->apPage[pCur->iPage]->nCell==0 );
+ *pRes = 1;
+ }else{
+ assert( pCur->eState==CURSOR_VALID );
+ *pRes = 0;
+ rc = moveToRightmost(pCur);
+ if( rc==SQLITE_OK ){
+ pCur->curFlags |= BTCF_AtLast;
+ }else{
+ pCur->curFlags &= ~BTCF_AtLast;
+ }
+
+ }
+ }
+ return rc;
+}
+
+/* Move the cursor so that it points to an entry near the key
+** specified by pIdxKey or intKey. Return a success code.
+**
+** For INTKEY tables, the intKey parameter is used. pIdxKey
+** must be NULL. For index tables, pIdxKey is used and intKey
+** is ignored.
+**
+** If an exact match is not found, then the cursor is always
+** left pointing at a leaf page which would hold the entry if it
+** were present. The cursor might point to an entry that comes
+** before or after the key.
+**
+** An integer is written into *pRes which is the result of
+** comparing the key with the entry to which the cursor is
+** pointing. The meaning of the integer written into
+** *pRes is as follows:
+**
+** *pRes<0 The cursor is left pointing at an entry that
+** is smaller than intKey/pIdxKey or if the table is empty
+** and the cursor is therefore left point to nothing.
+**
+** *pRes==0 The cursor is left pointing at an entry that
+** exactly matches intKey/pIdxKey.
+**
+** *pRes>0 The cursor is left pointing at an entry that
+** is larger than intKey/pIdxKey.
+**
+** For index tables, the pIdxKey->eqSeen field is set to 1 if there
+** exists an entry in the table that exactly matches pIdxKey.
+*/
+SQLITE_PRIVATE int sqlite3BtreeMovetoUnpacked(
+ BtCursor *pCur, /* The cursor to be moved */
+ UnpackedRecord *pIdxKey, /* Unpacked index key */
+ i64 intKey, /* The table key */
+ int biasRight, /* If true, bias the search to the high end */
+ int *pRes /* Write search results here */
+){
+ int rc;
+ RecordCompare xRecordCompare;
+
+ assert( cursorOwnsBtShared(pCur) );
+ assert( sqlite3_mutex_held(pCur->pBtree->db->mutex) );
+ assert( pRes );
+ assert( (pIdxKey==0)==(pCur->pKeyInfo==0) );
+ assert( pCur->eState!=CURSOR_VALID || (pIdxKey==0)==(pCur->curIntKey!=0) );
+
+ /* If the cursor is already positioned at the point we are trying
+ ** to move to, then just return without doing any work */
+ if( pIdxKey==0
+ && pCur->eState==CURSOR_VALID && (pCur->curFlags & BTCF_ValidNKey)!=0
+ ){
+ if( pCur->info.nKey==intKey ){
+ *pRes = 0;
+ return SQLITE_OK;
+ }
+ if( pCur->info.nKey<intKey ){
+ if( (pCur->curFlags & BTCF_AtLast)!=0 ){
+ *pRes = -1;
+ return SQLITE_OK;
+ }
+ /* If the requested key is one more than the previous key, then
+ ** try to get there using sqlite3BtreeNext() rather than a full
+ ** binary search. This is an optimization only. The correct answer
+ ** is still obtained without this ase, only a little more slowely */
+ if( pCur->info.nKey+1==intKey && !pCur->skipNext ){
+ *pRes = 0;
+ rc = sqlite3BtreeNext(pCur, pRes);
+ if( rc ) return rc;
+ if( *pRes==0 ){
+ getCellInfo(pCur);
+ if( pCur->info.nKey==intKey ){
+ return SQLITE_OK;
+ }
+ }
+ }
+ }
+ }
+
+ if( pIdxKey ){
+ xRecordCompare = sqlite3VdbeFindCompare(pIdxKey);
+ pIdxKey->errCode = 0;
+ assert( pIdxKey->default_rc==1
+ || pIdxKey->default_rc==0
+ || pIdxKey->default_rc==-1
+ );
+ }else{
+ xRecordCompare = 0; /* All keys are integers */
+ }
+
+ rc = moveToRoot(pCur);
+ if( rc ){
+ return rc;
+ }
+ assert( pCur->pgnoRoot==0 || pCur->apPage[pCur->iPage] );
+ assert( pCur->pgnoRoot==0 || pCur->apPage[pCur->iPage]->isInit );
+ assert( pCur->eState==CURSOR_INVALID || pCur->apPage[pCur->iPage]->nCell>0 );
+ if( pCur->eState==CURSOR_INVALID ){
+ *pRes = -1;
+ assert( pCur->pgnoRoot==0 || pCur->apPage[pCur->iPage]->nCell==0 );
+ return SQLITE_OK;
+ }
+ assert( pCur->apPage[0]->intKey==pCur->curIntKey );
+ assert( pCur->curIntKey || pIdxKey );
+ for(;;){
+ int lwr, upr, idx, c;
+ Pgno chldPg;
+ MemPage *pPage = pCur->apPage[pCur->iPage];
+ u8 *pCell; /* Pointer to current cell in pPage */
+
+ /* pPage->nCell must be greater than zero. If this is the root-page
+ ** the cursor would have been INVALID above and this for(;;) loop
+ ** not run. If this is not the root-page, then the moveToChild() routine
+ ** would have already detected db corruption. Similarly, pPage must
+ ** be the right kind (index or table) of b-tree page. Otherwise
+ ** a moveToChild() or moveToRoot() call would have detected corruption. */
+ assert( pPage->nCell>0 );
+ assert( pPage->intKey==(pIdxKey==0) );
+ lwr = 0;
+ upr = pPage->nCell-1;
+ assert( biasRight==0 || biasRight==1 );
+ idx = upr>>(1-biasRight); /* idx = biasRight ? upr : (lwr+upr)/2; */
+ pCur->aiIdx[pCur->iPage] = (u16)idx;
+ if( xRecordCompare==0 ){
+ for(;;){
+ i64 nCellKey;
+ pCell = findCellPastPtr(pPage, idx);
+ if( pPage->intKeyLeaf ){
+ while( 0x80 <= *(pCell++) ){
+ if( pCell>=pPage->aDataEnd ) return SQLITE_CORRUPT_BKPT;
+ }
+ }
+ getVarint(pCell, (u64*)&nCellKey);
+ if( nCellKey<intKey ){
+ lwr = idx+1;
+ if( lwr>upr ){ c = -1; break; }
+ }else if( nCellKey>intKey ){
+ upr = idx-1;
+ if( lwr>upr ){ c = +1; break; }
+ }else{
+ assert( nCellKey==intKey );
+ pCur->aiIdx[pCur->iPage] = (u16)idx;
+ if( !pPage->leaf ){
+ lwr = idx;
+ goto moveto_next_layer;
+ }else{
+ pCur->curFlags |= BTCF_ValidNKey;
+ pCur->info.nKey = nCellKey;
+ pCur->info.nSize = 0;
+ *pRes = 0;
+ return SQLITE_OK;
+ }
+ }
+ assert( lwr+upr>=0 );
+ idx = (lwr+upr)>>1; /* idx = (lwr+upr)/2; */
+ }
+ }else{
+ for(;;){
+ int nCell; /* Size of the pCell cell in bytes */
+ pCell = findCellPastPtr(pPage, idx);
+
+ /* The maximum supported page-size is 65536 bytes. This means that
+ ** the maximum number of record bytes stored on an index B-Tree
+ ** page is less than 16384 bytes and may be stored as a 2-byte
+ ** varint. This information is used to attempt to avoid parsing
+ ** the entire cell by checking for the cases where the record is
+ ** stored entirely within the b-tree page by inspecting the first
+ ** 2 bytes of the cell.
+ */
+ nCell = pCell[0];
+ if( nCell<=pPage->max1bytePayload ){
+ /* This branch runs if the record-size field of the cell is a
+ ** single byte varint and the record fits entirely on the main
+ ** b-tree page. */
+ testcase( pCell+nCell+1==pPage->aDataEnd );
+ c = xRecordCompare(nCell, (void*)&pCell[1], pIdxKey);
+ }else if( !(pCell[1] & 0x80)
+ && (nCell = ((nCell&0x7f)<<7) + pCell[1])<=pPage->maxLocal
+ ){
+ /* The record-size field is a 2 byte varint and the record
+ ** fits entirely on the main b-tree page. */
+ testcase( pCell+nCell+2==pPage->aDataEnd );
+ c = xRecordCompare(nCell, (void*)&pCell[2], pIdxKey);
+ }else{
+ /* The record flows over onto one or more overflow pages. In
+ ** this case the whole cell needs to be parsed, a buffer allocated
+ ** and accessPayload() used to retrieve the record into the
+ ** buffer before VdbeRecordCompare() can be called.
+ **
+ ** If the record is corrupt, the xRecordCompare routine may read
+ ** up to two varints past the end of the buffer. An extra 18
+ ** bytes of padding is allocated at the end of the buffer in
+ ** case this happens. */
+ void *pCellKey;
+ u8 * const pCellBody = pCell - pPage->childPtrSize;
+ pPage->xParseCell(pPage, pCellBody, &pCur->info);
+ nCell = (int)pCur->info.nKey;
+ testcase( nCell<0 ); /* True if key size is 2^32 or more */
+ testcase( nCell==0 ); /* Invalid key size: 0x80 0x80 0x00 */
+ testcase( nCell==1 ); /* Invalid key size: 0x80 0x80 0x01 */
+ testcase( nCell==2 ); /* Minimum legal index key size */
+ if( nCell<2 ){
+ rc = SQLITE_CORRUPT_BKPT;
+ goto moveto_finish;
+ }
+ pCellKey = sqlite3Malloc( nCell+18 );
+ if( pCellKey==0 ){
+ rc = SQLITE_NOMEM_BKPT;
+ goto moveto_finish;
+ }
+ pCur->aiIdx[pCur->iPage] = (u16)idx;
+ rc = accessPayload(pCur, 0, nCell, (unsigned char*)pCellKey, 0);
+ pCur->curFlags &= ~BTCF_ValidOvfl;
+ if( rc ){
+ sqlite3_free(pCellKey);
+ goto moveto_finish;
+ }
+ c = xRecordCompare(nCell, pCellKey, pIdxKey);
+ sqlite3_free(pCellKey);
+ }
+ assert(
+ (pIdxKey->errCode!=SQLITE_CORRUPT || c==0)
+ && (pIdxKey->errCode!=SQLITE_NOMEM || pCur->pBtree->db->mallocFailed)
+ );
+ if( c<0 ){
+ lwr = idx+1;
+ }else if( c>0 ){
+ upr = idx-1;
+ }else{
+ assert( c==0 );
+ *pRes = 0;
+ rc = SQLITE_OK;
+ pCur->aiIdx[pCur->iPage] = (u16)idx;
+ if( pIdxKey->errCode ) rc = SQLITE_CORRUPT;
+ goto moveto_finish;
+ }
+ if( lwr>upr ) break;
+ assert( lwr+upr>=0 );
+ idx = (lwr+upr)>>1; /* idx = (lwr+upr)/2 */
+ }
+ }
+ assert( lwr==upr+1 || (pPage->intKey && !pPage->leaf) );
+ assert( pPage->isInit );
+ if( pPage->leaf ){
+ assert( pCur->aiIdx[pCur->iPage]<pCur->apPage[pCur->iPage]->nCell );
+ pCur->aiIdx[pCur->iPage] = (u16)idx;
+ *pRes = c;
+ rc = SQLITE_OK;
+ goto moveto_finish;
+ }
+moveto_next_layer:
+ if( lwr>=pPage->nCell ){
+ chldPg = get4byte(&pPage->aData[pPage->hdrOffset+8]);
+ }else{
+ chldPg = get4byte(findCell(pPage, lwr));
+ }
+ pCur->aiIdx[pCur->iPage] = (u16)lwr;
+ rc = moveToChild(pCur, chldPg);
+ if( rc ) break;
+ }
+moveto_finish:
+ pCur->info.nSize = 0;
+ assert( (pCur->curFlags & BTCF_ValidOvfl)==0 );
+ return rc;
+}
+
+
+/*
+** Return TRUE if the cursor is not pointing at an entry of the table.
+**
+** TRUE will be returned after a call to sqlite3BtreeNext() moves
+** past the last entry in the table or sqlite3BtreePrev() moves past
+** the first entry. TRUE is also returned if the table is empty.
+*/
+SQLITE_PRIVATE int sqlite3BtreeEof(BtCursor *pCur){
+ /* TODO: What if the cursor is in CURSOR_REQUIRESEEK but all table entries
+ ** have been deleted? This API will need to change to return an error code
+ ** as well as the boolean result value.
+ */
+ return (CURSOR_VALID!=pCur->eState);
+}
+
+/*
+** Advance the cursor to the next entry in the database. If
+** successful then set *pRes=0. If the cursor
+** was already pointing to the last entry in the database before
+** this routine was called, then set *pRes=1.
+**
+** The main entry point is sqlite3BtreeNext(). That routine is optimized
+** for the common case of merely incrementing the cell counter BtCursor.aiIdx
+** to the next cell on the current page. The (slower) btreeNext() helper
+** routine is called when it is necessary to move to a different page or
+** to restore the cursor.
+**
+** The calling function will set *pRes to 0 or 1. The initial *pRes value
+** will be 1 if the cursor being stepped corresponds to an SQL index and
+** if this routine could have been skipped if that SQL index had been
+** a unique index. Otherwise the caller will have set *pRes to zero.
+** Zero is the common case. The btree implementation is free to use the
+** initial *pRes value as a hint to improve performance, but the current
+** SQLite btree implementation does not. (Note that the comdb2 btree
+** implementation does use this hint, however.)
+*/
+static SQLITE_NOINLINE int btreeNext(BtCursor *pCur, int *pRes){
+ int rc;
+ int idx;
+ MemPage *pPage;
+
+ assert( cursorOwnsBtShared(pCur) );
+ assert( pCur->skipNext==0 || pCur->eState!=CURSOR_VALID );
+ assert( *pRes==0 );
+ if( pCur->eState!=CURSOR_VALID ){
+ assert( (pCur->curFlags & BTCF_ValidOvfl)==0 );
+ rc = restoreCursorPosition(pCur);
+ if( rc!=SQLITE_OK ){
+ return rc;
+ }
+ if( CURSOR_INVALID==pCur->eState ){
+ *pRes = 1;
+ return SQLITE_OK;
+ }
+ if( pCur->skipNext ){
+ assert( pCur->eState==CURSOR_VALID || pCur->eState==CURSOR_SKIPNEXT );
+ pCur->eState = CURSOR_VALID;
+ if( pCur->skipNext>0 ){
+ pCur->skipNext = 0;
+ return SQLITE_OK;
+ }
+ pCur->skipNext = 0;
+ }
+ }
+
+ pPage = pCur->apPage[pCur->iPage];
+ idx = ++pCur->aiIdx[pCur->iPage];
+ assert( pPage->isInit );
+
+ /* If the database file is corrupt, it is possible for the value of idx
+ ** to be invalid here. This can only occur if a second cursor modifies
+ ** the page while cursor pCur is holding a reference to it. Which can
+ ** only happen if the database is corrupt in such a way as to link the
+ ** page into more than one b-tree structure. */
+ testcase( idx>pPage->nCell );
+
+ if( idx>=pPage->nCell ){
+ if( !pPage->leaf ){
+ rc = moveToChild(pCur, get4byte(&pPage->aData[pPage->hdrOffset+8]));
+ if( rc ) return rc;
+ return moveToLeftmost(pCur);
+ }
+ do{
+ if( pCur->iPage==0 ){
+ *pRes = 1;
+ pCur->eState = CURSOR_INVALID;
+ return SQLITE_OK;
+ }
+ moveToParent(pCur);
+ pPage = pCur->apPage[pCur->iPage];
+ }while( pCur->aiIdx[pCur->iPage]>=pPage->nCell );
+ if( pPage->intKey ){
+ return sqlite3BtreeNext(pCur, pRes);
+ }else{
+ return SQLITE_OK;
+ }
+ }
+ if( pPage->leaf ){
+ return SQLITE_OK;
+ }else{
+ return moveToLeftmost(pCur);
+ }
+}
+SQLITE_PRIVATE int sqlite3BtreeNext(BtCursor *pCur, int *pRes){
+ MemPage *pPage;
+ assert( cursorOwnsBtShared(pCur) );
+ assert( pRes!=0 );
+ assert( *pRes==0 || *pRes==1 );
+ assert( pCur->skipNext==0 || pCur->eState!=CURSOR_VALID );
+ pCur->info.nSize = 0;
+ pCur->curFlags &= ~(BTCF_ValidNKey|BTCF_ValidOvfl);
+ *pRes = 0;
+ if( pCur->eState!=CURSOR_VALID ) return btreeNext(pCur, pRes);
+ pPage = pCur->apPage[pCur->iPage];
+ if( (++pCur->aiIdx[pCur->iPage])>=pPage->nCell ){
+ pCur->aiIdx[pCur->iPage]--;
+ return btreeNext(pCur, pRes);
+ }
+ if( pPage->leaf ){
+ return SQLITE_OK;
+ }else{
+ return moveToLeftmost(pCur);
+ }
+}
+
+/*
+** Step the cursor to the back to the previous entry in the database. If
+** successful then set *pRes=0. If the cursor
+** was already pointing to the first entry in the database before
+** this routine was called, then set *pRes=1.
+**
+** The main entry point is sqlite3BtreePrevious(). That routine is optimized
+** for the common case of merely decrementing the cell counter BtCursor.aiIdx
+** to the previous cell on the current page. The (slower) btreePrevious()
+** helper routine is called when it is necessary to move to a different page
+** or to restore the cursor.
+**
+** The calling function will set *pRes to 0 or 1. The initial *pRes value
+** will be 1 if the cursor being stepped corresponds to an SQL index and
+** if this routine could have been skipped if that SQL index had been
+** a unique index. Otherwise the caller will have set *pRes to zero.
+** Zero is the common case. The btree implementation is free to use the
+** initial *pRes value as a hint to improve performance, but the current
+** SQLite btree implementation does not. (Note that the comdb2 btree
+** implementation does use this hint, however.)
+*/
+static SQLITE_NOINLINE int btreePrevious(BtCursor *pCur, int *pRes){
+ int rc;
+ MemPage *pPage;
+
+ assert( cursorOwnsBtShared(pCur) );
+ assert( pRes!=0 );
+ assert( *pRes==0 );
+ assert( pCur->skipNext==0 || pCur->eState!=CURSOR_VALID );
+ assert( (pCur->curFlags & (BTCF_AtLast|BTCF_ValidOvfl|BTCF_ValidNKey))==0 );
+ assert( pCur->info.nSize==0 );
+ if( pCur->eState!=CURSOR_VALID ){
+ rc = restoreCursorPosition(pCur);
+ if( rc!=SQLITE_OK ){
+ return rc;
+ }
+ if( CURSOR_INVALID==pCur->eState ){
+ *pRes = 1;
+ return SQLITE_OK;
+ }
+ if( pCur->skipNext ){
+ assert( pCur->eState==CURSOR_VALID || pCur->eState==CURSOR_SKIPNEXT );
+ pCur->eState = CURSOR_VALID;
+ if( pCur->skipNext<0 ){
+ pCur->skipNext = 0;
+ return SQLITE_OK;
+ }
+ pCur->skipNext = 0;
+ }
+ }
+
+ pPage = pCur->apPage[pCur->iPage];
+ assert( pPage->isInit );
+ if( !pPage->leaf ){
+ int idx = pCur->aiIdx[pCur->iPage];
+ rc = moveToChild(pCur, get4byte(findCell(pPage, idx)));
+ if( rc ) return rc;
+ rc = moveToRightmost(pCur);
+ }else{
+ while( pCur->aiIdx[pCur->iPage]==0 ){
+ if( pCur->iPage==0 ){
+ pCur->eState = CURSOR_INVALID;
+ *pRes = 1;
+ return SQLITE_OK;
+ }
+ moveToParent(pCur);
+ }
+ assert( pCur->info.nSize==0 );
+ assert( (pCur->curFlags & (BTCF_ValidOvfl))==0 );
+
+ pCur->aiIdx[pCur->iPage]--;
+ pPage = pCur->apPage[pCur->iPage];
+ if( pPage->intKey && !pPage->leaf ){
+ rc = sqlite3BtreePrevious(pCur, pRes);
+ }else{
+ rc = SQLITE_OK;
+ }
+ }
+ return rc;
+}
+SQLITE_PRIVATE int sqlite3BtreePrevious(BtCursor *pCur, int *pRes){
+ assert( cursorOwnsBtShared(pCur) );
+ assert( pRes!=0 );
+ assert( *pRes==0 || *pRes==1 );
+ assert( pCur->skipNext==0 || pCur->eState!=CURSOR_VALID );
+ *pRes = 0;
+ pCur->curFlags &= ~(BTCF_AtLast|BTCF_ValidOvfl|BTCF_ValidNKey);
+ pCur->info.nSize = 0;
+ if( pCur->eState!=CURSOR_VALID
+ || pCur->aiIdx[pCur->iPage]==0
+ || pCur->apPage[pCur->iPage]->leaf==0
+ ){
+ return btreePrevious(pCur, pRes);
+ }
+ pCur->aiIdx[pCur->iPage]--;
+ return SQLITE_OK;
+}
+
+/*
+** Allocate a new page from the database file.
+**
+** The new page is marked as dirty. (In other words, sqlite3PagerWrite()
+** has already been called on the new page.) The new page has also
+** been referenced and the calling routine is responsible for calling
+** sqlite3PagerUnref() on the new page when it is done.
+**
+** SQLITE_OK is returned on success. Any other return value indicates
+** an error. *ppPage is set to NULL in the event of an error.
+**
+** If the "nearby" parameter is not 0, then an effort is made to
+** locate a page close to the page number "nearby". This can be used in an
+** attempt to keep related pages close to each other in the database file,
+** which in turn can make database access faster.
+**
+** If the eMode parameter is BTALLOC_EXACT and the nearby page exists
+** anywhere on the free-list, then it is guaranteed to be returned. If
+** eMode is BTALLOC_LT then the page returned will be less than or equal
+** to nearby if any such page exists. If eMode is BTALLOC_ANY then there
+** are no restrictions on which page is returned.
+*/
+static int allocateBtreePage(
+ BtShared *pBt, /* The btree */
+ MemPage **ppPage, /* Store pointer to the allocated page here */
+ Pgno *pPgno, /* Store the page number here */
+ Pgno nearby, /* Search for a page near this one */
+ u8 eMode /* BTALLOC_EXACT, BTALLOC_LT, or BTALLOC_ANY */
+){
+ MemPage *pPage1;
+ int rc;
+ u32 n; /* Number of pages on the freelist */
+ u32 k; /* Number of leaves on the trunk of the freelist */
+ MemPage *pTrunk = 0;
+ MemPage *pPrevTrunk = 0;
+ Pgno mxPage; /* Total size of the database file */
+
+ assert( sqlite3_mutex_held(pBt->mutex) );
+ assert( eMode==BTALLOC_ANY || (nearby>0 && IfNotOmitAV(pBt->autoVacuum)) );
+ pPage1 = pBt->pPage1;
+ mxPage = btreePagecount(pBt);
+ /* EVIDENCE-OF: R-05119-02637 The 4-byte big-endian integer at offset 36
+ ** stores stores the total number of pages on the freelist. */
+ n = get4byte(&pPage1->aData[36]);
+ testcase( n==mxPage-1 );
+ if( n>=mxPage ){
+ return SQLITE_CORRUPT_BKPT;
+ }
+ if( n>0 ){
+ /* There are pages on the freelist. Reuse one of those pages. */
+ Pgno iTrunk;
+ u8 searchList = 0; /* If the free-list must be searched for 'nearby' */
+ u32 nSearch = 0; /* Count of the number of search attempts */
+
+ /* If eMode==BTALLOC_EXACT and a query of the pointer-map
+ ** shows that the page 'nearby' is somewhere on the free-list, then
+ ** the entire-list will be searched for that page.
+ */
+#ifndef SQLITE_OMIT_AUTOVACUUM
+ if( eMode==BTALLOC_EXACT ){
+ if( nearby<=mxPage ){
+ u8 eType;
+ assert( nearby>0 );
+ assert( pBt->autoVacuum );
+ rc = ptrmapGet(pBt, nearby, &eType, 0);
+ if( rc ) return rc;
+ if( eType==PTRMAP_FREEPAGE ){
+ searchList = 1;
+ }
+ }
+ }else if( eMode==BTALLOC_LE ){
+ searchList = 1;
+ }
+#endif
+
+ /* Decrement the free-list count by 1. Set iTrunk to the index of the
+ ** first free-list trunk page. iPrevTrunk is initially 1.
+ */
+ rc = sqlite3PagerWrite(pPage1->pDbPage);
+ if( rc ) return rc;
+ put4byte(&pPage1->aData[36], n-1);
+
+ /* The code within this loop is run only once if the 'searchList' variable
+ ** is not true. Otherwise, it runs once for each trunk-page on the
+ ** free-list until the page 'nearby' is located (eMode==BTALLOC_EXACT)
+ ** or until a page less than 'nearby' is located (eMode==BTALLOC_LT)
+ */
+ do {
+ pPrevTrunk = pTrunk;
+ if( pPrevTrunk ){
+ /* EVIDENCE-OF: R-01506-11053 The first integer on a freelist trunk page
+ ** is the page number of the next freelist trunk page in the list or
+ ** zero if this is the last freelist trunk page. */
+ iTrunk = get4byte(&pPrevTrunk->aData[0]);
+ }else{
+ /* EVIDENCE-OF: R-59841-13798 The 4-byte big-endian integer at offset 32
+ ** stores the page number of the first page of the freelist, or zero if
+ ** the freelist is empty. */
+ iTrunk = get4byte(&pPage1->aData[32]);
+ }
+ testcase( iTrunk==mxPage );
+ if( iTrunk>mxPage || nSearch++ > n ){
+ rc = SQLITE_CORRUPT_BKPT;
+ }else{
+ rc = btreeGetUnusedPage(pBt, iTrunk, &pTrunk, 0);
+ }
+ if( rc ){
+ pTrunk = 0;
+ goto end_allocate_page;
+ }
+ assert( pTrunk!=0 );
+ assert( pTrunk->aData!=0 );
+ /* EVIDENCE-OF: R-13523-04394 The second integer on a freelist trunk page
+ ** is the number of leaf page pointers to follow. */
+ k = get4byte(&pTrunk->aData[4]);
+ if( k==0 && !searchList ){
+ /* The trunk has no leaves and the list is not being searched.
+ ** So extract the trunk page itself and use it as the newly
+ ** allocated page */
+ assert( pPrevTrunk==0 );
+ rc = sqlite3PagerWrite(pTrunk->pDbPage);
+ if( rc ){
+ goto end_allocate_page;
+ }
+ *pPgno = iTrunk;
+ memcpy(&pPage1->aData[32], &pTrunk->aData[0], 4);
+ *ppPage = pTrunk;
+ pTrunk = 0;
+ TRACE(("ALLOCATE: %d trunk - %d free pages left\n", *pPgno, n-1));
+ }else if( k>(u32)(pBt->usableSize/4 - 2) ){
+ /* Value of k is out of range. Database corruption */
+ rc = SQLITE_CORRUPT_BKPT;
+ goto end_allocate_page;
+#ifndef SQLITE_OMIT_AUTOVACUUM
+ }else if( searchList
+ && (nearby==iTrunk || (iTrunk<nearby && eMode==BTALLOC_LE))
+ ){
+ /* The list is being searched and this trunk page is the page
+ ** to allocate, regardless of whether it has leaves.
+ */
+ *pPgno = iTrunk;
+ *ppPage = pTrunk;
+ searchList = 0;
+ rc = sqlite3PagerWrite(pTrunk->pDbPage);
+ if( rc ){
+ goto end_allocate_page;
+ }
+ if( k==0 ){
+ if( !pPrevTrunk ){
+ memcpy(&pPage1->aData[32], &pTrunk->aData[0], 4);
+ }else{
+ rc = sqlite3PagerWrite(pPrevTrunk->pDbPage);
+ if( rc!=SQLITE_OK ){
+ goto end_allocate_page;
+ }
+ memcpy(&pPrevTrunk->aData[0], &pTrunk->aData[0], 4);
+ }
+ }else{
+ /* The trunk page is required by the caller but it contains
+ ** pointers to free-list leaves. The first leaf becomes a trunk
+ ** page in this case.
+ */
+ MemPage *pNewTrunk;
+ Pgno iNewTrunk = get4byte(&pTrunk->aData[8]);
+ if( iNewTrunk>mxPage ){
+ rc = SQLITE_CORRUPT_BKPT;
+ goto end_allocate_page;
+ }
+ testcase( iNewTrunk==mxPage );
+ rc = btreeGetUnusedPage(pBt, iNewTrunk, &pNewTrunk, 0);
+ if( rc!=SQLITE_OK ){
+ goto end_allocate_page;
+ }
+ rc = sqlite3PagerWrite(pNewTrunk->pDbPage);
+ if( rc!=SQLITE_OK ){
+ releasePage(pNewTrunk);
+ goto end_allocate_page;
+ }
+ memcpy(&pNewTrunk->aData[0], &pTrunk->aData[0], 4);
+ put4byte(&pNewTrunk->aData[4], k-1);
+ memcpy(&pNewTrunk->aData[8], &pTrunk->aData[12], (k-1)*4);
+ releasePage(pNewTrunk);
+ if( !pPrevTrunk ){
+ assert( sqlite3PagerIswriteable(pPage1->pDbPage) );
+ put4byte(&pPage1->aData[32], iNewTrunk);
+ }else{
+ rc = sqlite3PagerWrite(pPrevTrunk->pDbPage);
+ if( rc ){
+ goto end_allocate_page;
+ }
+ put4byte(&pPrevTrunk->aData[0], iNewTrunk);
+ }
+ }
+ pTrunk = 0;
+ TRACE(("ALLOCATE: %d trunk - %d free pages left\n", *pPgno, n-1));
+#endif
+ }else if( k>0 ){
+ /* Extract a leaf from the trunk */
+ u32 closest;
+ Pgno iPage;
+ unsigned char *aData = pTrunk->aData;
+ if( nearby>0 ){
+ u32 i;
+ closest = 0;
+ if( eMode==BTALLOC_LE ){
+ for(i=0; i<k; i++){
+ iPage = get4byte(&aData[8+i*4]);
+ if( iPage<=nearby ){
+ closest = i;
+ break;
+ }
+ }
+ }else{
+ int dist;
+ dist = sqlite3AbsInt32(get4byte(&aData[8]) - nearby);
+ for(i=1; i<k; i++){
+ int d2 = sqlite3AbsInt32(get4byte(&aData[8+i*4]) - nearby);
+ if( d2<dist ){
+ closest = i;
+ dist = d2;
+ }
+ }
+ }
+ }else{
+ closest = 0;
+ }
+
+ iPage = get4byte(&aData[8+closest*4]);
+ testcase( iPage==mxPage );
+ if( iPage>mxPage ){
+ rc = SQLITE_CORRUPT_BKPT;
+ goto end_allocate_page;
+ }
+ testcase( iPage==mxPage );
+ if( !searchList
+ || (iPage==nearby || (iPage<nearby && eMode==BTALLOC_LE))
+ ){
+ int noContent;
+ *pPgno = iPage;
+ TRACE(("ALLOCATE: %d was leaf %d of %d on trunk %d"
+ ": %d more free pages\n",
+ *pPgno, closest+1, k, pTrunk->pgno, n-1));
+ rc = sqlite3PagerWrite(pTrunk->pDbPage);
+ if( rc ) goto end_allocate_page;
+ if( closest<k-1 ){
+ memcpy(&aData[8+closest*4], &aData[4+k*4], 4);
+ }
+ put4byte(&aData[4], k-1);
+ noContent = !btreeGetHasContent(pBt, *pPgno)? PAGER_GET_NOCONTENT : 0;
+ rc = btreeGetUnusedPage(pBt, *pPgno, ppPage, noContent);
+ if( rc==SQLITE_OK ){
+ rc = sqlite3PagerWrite((*ppPage)->pDbPage);
+ if( rc!=SQLITE_OK ){
+ releasePage(*ppPage);
+ *ppPage = 0;
+ }
+ }
+ searchList = 0;
+ }
+ }
+ releasePage(pPrevTrunk);
+ pPrevTrunk = 0;
+ }while( searchList );
+ }else{
+ /* There are no pages on the freelist, so append a new page to the
+ ** database image.
+ **
+ ** Normally, new pages allocated by this block can be requested from the
+ ** pager layer with the 'no-content' flag set. This prevents the pager
+ ** from trying to read the pages content from disk. However, if the
+ ** current transaction has already run one or more incremental-vacuum
+ ** steps, then the page we are about to allocate may contain content
+ ** that is required in the event of a rollback. In this case, do
+ ** not set the no-content flag. This causes the pager to load and journal
+ ** the current page content before overwriting it.
+ **
+ ** Note that the pager will not actually attempt to load or journal
+ ** content for any page that really does lie past the end of the database
+ ** file on disk. So the effects of disabling the no-content optimization
+ ** here are confined to those pages that lie between the end of the
+ ** database image and the end of the database file.
+ */
+ int bNoContent = (0==IfNotOmitAV(pBt->bDoTruncate))? PAGER_GET_NOCONTENT:0;
+
+ rc = sqlite3PagerWrite(pBt->pPage1->pDbPage);
+ if( rc ) return rc;
+ pBt->nPage++;
+ if( pBt->nPage==PENDING_BYTE_PAGE(pBt) ) pBt->nPage++;
+
+#ifndef SQLITE_OMIT_AUTOVACUUM
+ if( pBt->autoVacuum && PTRMAP_ISPAGE(pBt, pBt->nPage) ){
+ /* If *pPgno refers to a pointer-map page, allocate two new pages
+ ** at the end of the file instead of one. The first allocated page
+ ** becomes a new pointer-map page, the second is used by the caller.
+ */
+ MemPage *pPg = 0;
+ TRACE(("ALLOCATE: %d from end of file (pointer-map page)\n", pBt->nPage));
+ assert( pBt->nPage!=PENDING_BYTE_PAGE(pBt) );
+ rc = btreeGetUnusedPage(pBt, pBt->nPage, &pPg, bNoContent);
+ if( rc==SQLITE_OK ){
+ rc = sqlite3PagerWrite(pPg->pDbPage);
+ releasePage(pPg);
+ }
+ if( rc ) return rc;
+ pBt->nPage++;
+ if( pBt->nPage==PENDING_BYTE_PAGE(pBt) ){ pBt->nPage++; }
+ }
+#endif
+ put4byte(28 + (u8*)pBt->pPage1->aData, pBt->nPage);
+ *pPgno = pBt->nPage;
+
+ assert( *pPgno!=PENDING_BYTE_PAGE(pBt) );
+ rc = btreeGetUnusedPage(pBt, *pPgno, ppPage, bNoContent);
+ if( rc ) return rc;
+ rc = sqlite3PagerWrite((*ppPage)->pDbPage);
+ if( rc!=SQLITE_OK ){
+ releasePage(*ppPage);
+ *ppPage = 0;
+ }
+ TRACE(("ALLOCATE: %d from end of file\n", *pPgno));
+ }
+
+ assert( *pPgno!=PENDING_BYTE_PAGE(pBt) );
+
+end_allocate_page:
+ releasePage(pTrunk);
+ releasePage(pPrevTrunk);
+ assert( rc!=SQLITE_OK || sqlite3PagerPageRefcount((*ppPage)->pDbPage)<=1 );
+ assert( rc!=SQLITE_OK || (*ppPage)->isInit==0 );
+ return rc;
+}
+
+/*
+** This function is used to add page iPage to the database file free-list.
+** It is assumed that the page is not already a part of the free-list.
+**
+** The value passed as the second argument to this function is optional.
+** If the caller happens to have a pointer to the MemPage object
+** corresponding to page iPage handy, it may pass it as the second value.
+** Otherwise, it may pass NULL.
+**
+** If a pointer to a MemPage object is passed as the second argument,
+** its reference count is not altered by this function.
+*/
+static int freePage2(BtShared *pBt, MemPage *pMemPage, Pgno iPage){
+ MemPage *pTrunk = 0; /* Free-list trunk page */
+ Pgno iTrunk = 0; /* Page number of free-list trunk page */
+ MemPage *pPage1 = pBt->pPage1; /* Local reference to page 1 */
+ MemPage *pPage; /* Page being freed. May be NULL. */
+ int rc; /* Return Code */
+ int nFree; /* Initial number of pages on free-list */
+
+ assert( sqlite3_mutex_held(pBt->mutex) );
+ assert( CORRUPT_DB || iPage>1 );
+ assert( !pMemPage || pMemPage->pgno==iPage );
+
+ if( iPage<2 ) return SQLITE_CORRUPT_BKPT;
+ if( pMemPage ){
+ pPage = pMemPage;
+ sqlite3PagerRef(pPage->pDbPage);
+ }else{
+ pPage = btreePageLookup(pBt, iPage);
+ }
+
+ /* Increment the free page count on pPage1 */
+ rc = sqlite3PagerWrite(pPage1->pDbPage);
+ if( rc ) goto freepage_out;
+ nFree = get4byte(&pPage1->aData[36]);
+ put4byte(&pPage1->aData[36], nFree+1);
+
+ if( pBt->btsFlags & BTS_SECURE_DELETE ){
+ /* If the secure_delete option is enabled, then
+ ** always fully overwrite deleted information with zeros.
+ */
+ if( (!pPage && ((rc = btreeGetPage(pBt, iPage, &pPage, 0))!=0) )
+ || ((rc = sqlite3PagerWrite(pPage->pDbPage))!=0)
+ ){
+ goto freepage_out;
+ }
+ memset(pPage->aData, 0, pPage->pBt->pageSize);
+ }
+
+ /* If the database supports auto-vacuum, write an entry in the pointer-map
+ ** to indicate that the page is free.
+ */
+ if( ISAUTOVACUUM ){
+ ptrmapPut(pBt, iPage, PTRMAP_FREEPAGE, 0, &rc);
+ if( rc ) goto freepage_out;
+ }
+
+ /* Now manipulate the actual database free-list structure. There are two
+ ** possibilities. If the free-list is currently empty, or if the first
+ ** trunk page in the free-list is full, then this page will become a
+ ** new free-list trunk page. Otherwise, it will become a leaf of the
+ ** first trunk page in the current free-list. This block tests if it
+ ** is possible to add the page as a new free-list leaf.
+ */
+ if( nFree!=0 ){
+ u32 nLeaf; /* Initial number of leaf cells on trunk page */
+
+ iTrunk = get4byte(&pPage1->aData[32]);
+ rc = btreeGetPage(pBt, iTrunk, &pTrunk, 0);
+ if( rc!=SQLITE_OK ){
+ goto freepage_out;
+ }
+
+ nLeaf = get4byte(&pTrunk->aData[4]);
+ assert( pBt->usableSize>32 );
+ if( nLeaf > (u32)pBt->usableSize/4 - 2 ){
+ rc = SQLITE_CORRUPT_BKPT;
+ goto freepage_out;
+ }
+ if( nLeaf < (u32)pBt->usableSize/4 - 8 ){
+ /* In this case there is room on the trunk page to insert the page
+ ** being freed as a new leaf.
+ **
+ ** Note that the trunk page is not really full until it contains
+ ** usableSize/4 - 2 entries, not usableSize/4 - 8 entries as we have
+ ** coded. But due to a coding error in versions of SQLite prior to
+ ** 3.6.0, databases with freelist trunk pages holding more than
+ ** usableSize/4 - 8 entries will be reported as corrupt. In order
+ ** to maintain backwards compatibility with older versions of SQLite,
+ ** we will continue to restrict the number of entries to usableSize/4 - 8
+ ** for now. At some point in the future (once everyone has upgraded
+ ** to 3.6.0 or later) we should consider fixing the conditional above
+ ** to read "usableSize/4-2" instead of "usableSize/4-8".
+ **
+ ** EVIDENCE-OF: R-19920-11576 However, newer versions of SQLite still
+ ** avoid using the last six entries in the freelist trunk page array in
+ ** order that database files created by newer versions of SQLite can be
+ ** read by older versions of SQLite.
+ */
+ rc = sqlite3PagerWrite(pTrunk->pDbPage);
+ if( rc==SQLITE_OK ){
+ put4byte(&pTrunk->aData[4], nLeaf+1);
+ put4byte(&pTrunk->aData[8+nLeaf*4], iPage);
+ if( pPage && (pBt->btsFlags & BTS_SECURE_DELETE)==0 ){
+ sqlite3PagerDontWrite(pPage->pDbPage);
+ }
+ rc = btreeSetHasContent(pBt, iPage);
+ }
+ TRACE(("FREE-PAGE: %d leaf on trunk page %d\n",pPage->pgno,pTrunk->pgno));
+ goto freepage_out;
+ }
+ }
+
+ /* If control flows to this point, then it was not possible to add the
+ ** the page being freed as a leaf page of the first trunk in the free-list.
+ ** Possibly because the free-list is empty, or possibly because the
+ ** first trunk in the free-list is full. Either way, the page being freed
+ ** will become the new first trunk page in the free-list.
+ */
+ if( pPage==0 && SQLITE_OK!=(rc = btreeGetPage(pBt, iPage, &pPage, 0)) ){
+ goto freepage_out;
+ }
+ rc = sqlite3PagerWrite(pPage->pDbPage);
+ if( rc!=SQLITE_OK ){
+ goto freepage_out;
+ }
+ put4byte(pPage->aData, iTrunk);
+ put4byte(&pPage->aData[4], 0);
+ put4byte(&pPage1->aData[32], iPage);
+ TRACE(("FREE-PAGE: %d new trunk page replacing %d\n", pPage->pgno, iTrunk));
+
+freepage_out:
+ if( pPage ){
+ pPage->isInit = 0;
+ }
+ releasePage(pPage);
+ releasePage(pTrunk);
+ return rc;
+}
+static void freePage(MemPage *pPage, int *pRC){
+ if( (*pRC)==SQLITE_OK ){
+ *pRC = freePage2(pPage->pBt, pPage, pPage->pgno);
+ }
+}
+
+/*
+** Free any overflow pages associated with the given Cell. Write the
+** local Cell size (the number of bytes on the original page, omitting
+** overflow) into *pnSize.
+*/
+static int clearCell(
+ MemPage *pPage, /* The page that contains the Cell */
+ unsigned char *pCell, /* First byte of the Cell */
+ CellInfo *pInfo /* Size information about the cell */
+){
+ BtShared *pBt = pPage->pBt;
+ Pgno ovflPgno;
+ int rc;
+ int nOvfl;
+ u32 ovflPageSize;
+
+ assert( sqlite3_mutex_held(pPage->pBt->mutex) );
+ pPage->xParseCell(pPage, pCell, pInfo);
+ if( pInfo->nLocal==pInfo->nPayload ){
+ return SQLITE_OK; /* No overflow pages. Return without doing anything */
+ }
+ if( pCell+pInfo->nSize-1 > pPage->aData+pPage->maskPage ){
+ return SQLITE_CORRUPT_BKPT; /* Cell extends past end of page */
+ }
+ ovflPgno = get4byte(pCell + pInfo->nSize - 4);
+ assert( pBt->usableSize > 4 );
+ ovflPageSize = pBt->usableSize - 4;
+ nOvfl = (pInfo->nPayload - pInfo->nLocal + ovflPageSize - 1)/ovflPageSize;
+ assert( nOvfl>0 ||
+ (CORRUPT_DB && (pInfo->nPayload + ovflPageSize)<ovflPageSize)
+ );
+ while( nOvfl-- ){
+ Pgno iNext = 0;
+ MemPage *pOvfl = 0;
+ if( ovflPgno<2 || ovflPgno>btreePagecount(pBt) ){
+ /* 0 is not a legal page number and page 1 cannot be an
+ ** overflow page. Therefore if ovflPgno<2 or past the end of the
+ ** file the database must be corrupt. */
+ return SQLITE_CORRUPT_BKPT;
+ }
+ if( nOvfl ){
+ rc = getOverflowPage(pBt, ovflPgno, &pOvfl, &iNext);
+ if( rc ) return rc;
+ }
+
+ if( ( pOvfl || ((pOvfl = btreePageLookup(pBt, ovflPgno))!=0) )
+ && sqlite3PagerPageRefcount(pOvfl->pDbPage)!=1
+ ){
+ /* There is no reason any cursor should have an outstanding reference
+ ** to an overflow page belonging to a cell that is being deleted/updated.
+ ** So if there exists more than one reference to this page, then it
+ ** must not really be an overflow page and the database must be corrupt.
+ ** It is helpful to detect this before calling freePage2(), as
+ ** freePage2() may zero the page contents if secure-delete mode is
+ ** enabled. If this 'overflow' page happens to be a page that the
+ ** caller is iterating through or using in some other way, this
+ ** can be problematic.
+ */
+ rc = SQLITE_CORRUPT_BKPT;
+ }else{
+ rc = freePage2(pBt, pOvfl, ovflPgno);
+ }
+
+ if( pOvfl ){
+ sqlite3PagerUnref(pOvfl->pDbPage);
+ }
+ if( rc ) return rc;
+ ovflPgno = iNext;
+ }
+ return SQLITE_OK;
+}
+
+/*
+** Create the byte sequence used to represent a cell on page pPage
+** and write that byte sequence into pCell[]. Overflow pages are
+** allocated and filled in as necessary. The calling procedure
+** is responsible for making sure sufficient space has been allocated
+** for pCell[].
+**
+** Note that pCell does not necessary need to point to the pPage->aData
+** area. pCell might point to some temporary storage. The cell will
+** be constructed in this temporary area then copied into pPage->aData
+** later.
+*/
+static int fillInCell(
+ MemPage *pPage, /* The page that contains the cell */
+ unsigned char *pCell, /* Complete text of the cell */
+ const BtreePayload *pX, /* Payload with which to construct the cell */
+ int *pnSize /* Write cell size here */
+){
+ int nPayload;
+ const u8 *pSrc;
+ int nSrc, n, rc;
+ int spaceLeft;
+ MemPage *pOvfl = 0;
+ MemPage *pToRelease = 0;
+ unsigned char *pPrior;
+ unsigned char *pPayload;
+ BtShared *pBt = pPage->pBt;
+ Pgno pgnoOvfl = 0;
+ int nHeader;
+
+ assert( sqlite3_mutex_held(pPage->pBt->mutex) );
+
+ /* pPage is not necessarily writeable since pCell might be auxiliary
+ ** buffer space that is separate from the pPage buffer area */
+ assert( pCell<pPage->aData || pCell>=&pPage->aData[pBt->pageSize]
+ || sqlite3PagerIswriteable(pPage->pDbPage) );
+
+ /* Fill in the header. */
+ nHeader = pPage->childPtrSize;
+ if( pPage->intKey ){
+ nPayload = pX->nData + pX->nZero;
+ pSrc = pX->pData;
+ nSrc = pX->nData;
+ assert( pPage->intKeyLeaf ); /* fillInCell() only called for leaves */
+ nHeader += putVarint32(&pCell[nHeader], nPayload);
+ nHeader += putVarint(&pCell[nHeader], *(u64*)&pX->nKey);
+ }else{
+ assert( pX->nKey<=0x7fffffff && pX->pKey!=0 );
+ nSrc = nPayload = (int)pX->nKey;
+ pSrc = pX->pKey;
+ nHeader += putVarint32(&pCell[nHeader], nPayload);
+ }
+
+ /* Fill in the payload */
+ if( nPayload<=pPage->maxLocal ){
+ n = nHeader + nPayload;
+ testcase( n==3 );
+ testcase( n==4 );
+ if( n<4 ) n = 4;
+ *pnSize = n;
+ spaceLeft = nPayload;
+ pPrior = pCell;
+ }else{
+ int mn = pPage->minLocal;
+ n = mn + (nPayload - mn) % (pPage->pBt->usableSize - 4);
+ testcase( n==pPage->maxLocal );
+ testcase( n==pPage->maxLocal+1 );
+ if( n > pPage->maxLocal ) n = mn;
+ spaceLeft = n;
+ *pnSize = n + nHeader + 4;
+ pPrior = &pCell[nHeader+n];
+ }
+ pPayload = &pCell[nHeader];
+
+ /* At this point variables should be set as follows:
+ **
+ ** nPayload Total payload size in bytes
+ ** pPayload Begin writing payload here
+ ** spaceLeft Space available at pPayload. If nPayload>spaceLeft,
+ ** that means content must spill into overflow pages.
+ ** *pnSize Size of the local cell (not counting overflow pages)
+ ** pPrior Where to write the pgno of the first overflow page
+ **
+ ** Use a call to btreeParseCellPtr() to verify that the values above
+ ** were computed correctly.
+ */
+#if SQLITE_DEBUG
+ {
+ CellInfo info;
+ pPage->xParseCell(pPage, pCell, &info);
+ assert( nHeader==(int)(info.pPayload - pCell) );
+ assert( info.nKey==pX->nKey );
+ assert( *pnSize == info.nSize );
+ assert( spaceLeft == info.nLocal );
+ }
+#endif
+
+ /* Write the payload into the local Cell and any extra into overflow pages */
+ while( nPayload>0 ){
+ if( spaceLeft==0 ){
+#ifndef SQLITE_OMIT_AUTOVACUUM
+ Pgno pgnoPtrmap = pgnoOvfl; /* Overflow page pointer-map entry page */
+ if( pBt->autoVacuum ){
+ do{
+ pgnoOvfl++;
+ } while(
+ PTRMAP_ISPAGE(pBt, pgnoOvfl) || pgnoOvfl==PENDING_BYTE_PAGE(pBt)
+ );
+ }
+#endif
+ rc = allocateBtreePage(pBt, &pOvfl, &pgnoOvfl, pgnoOvfl, 0);
+#ifndef SQLITE_OMIT_AUTOVACUUM
+ /* If the database supports auto-vacuum, and the second or subsequent
+ ** overflow page is being allocated, add an entry to the pointer-map
+ ** for that page now.
+ **
+ ** If this is the first overflow page, then write a partial entry
+ ** to the pointer-map. If we write nothing to this pointer-map slot,
+ ** then the optimistic overflow chain processing in clearCell()
+ ** may misinterpret the uninitialized values and delete the
+ ** wrong pages from the database.
+ */
+ if( pBt->autoVacuum && rc==SQLITE_OK ){
+ u8 eType = (pgnoPtrmap?PTRMAP_OVERFLOW2:PTRMAP_OVERFLOW1);
+ ptrmapPut(pBt, pgnoOvfl, eType, pgnoPtrmap, &rc);
+ if( rc ){
+ releasePage(pOvfl);
+ }
+ }
+#endif
+ if( rc ){
+ releasePage(pToRelease);
+ return rc;
+ }
+
+ /* If pToRelease is not zero than pPrior points into the data area
+ ** of pToRelease. Make sure pToRelease is still writeable. */
+ assert( pToRelease==0 || sqlite3PagerIswriteable(pToRelease->pDbPage) );
+
+ /* If pPrior is part of the data area of pPage, then make sure pPage
+ ** is still writeable */
+ assert( pPrior<pPage->aData || pPrior>=&pPage->aData[pBt->pageSize]
+ || sqlite3PagerIswriteable(pPage->pDbPage) );
+
+ put4byte(pPrior, pgnoOvfl);
+ releasePage(pToRelease);
+ pToRelease = pOvfl;
+ pPrior = pOvfl->aData;
+ put4byte(pPrior, 0);
+ pPayload = &pOvfl->aData[4];
+ spaceLeft = pBt->usableSize - 4;
+ }
+ n = nPayload;
+ if( n>spaceLeft ) n = spaceLeft;
+
+ /* If pToRelease is not zero than pPayload points into the data area
+ ** of pToRelease. Make sure pToRelease is still writeable. */
+ assert( pToRelease==0 || sqlite3PagerIswriteable(pToRelease->pDbPage) );
+
+ /* If pPayload is part of the data area of pPage, then make sure pPage
+ ** is still writeable */
+ assert( pPayload<pPage->aData || pPayload>=&pPage->aData[pBt->pageSize]
+ || sqlite3PagerIswriteable(pPage->pDbPage) );
+
+ if( nSrc>0 ){
+ if( n>nSrc ) n = nSrc;
+ assert( pSrc );
+ memcpy(pPayload, pSrc, n);
+ }else{
+ memset(pPayload, 0, n);
+ }
+ nPayload -= n;
+ pPayload += n;
+ pSrc += n;
+ nSrc -= n;
+ spaceLeft -= n;
+ }
+ releasePage(pToRelease);
+ return SQLITE_OK;
+}
+
+/*
+** Remove the i-th cell from pPage. This routine effects pPage only.
+** The cell content is not freed or deallocated. It is assumed that
+** the cell content has been copied someplace else. This routine just
+** removes the reference to the cell from pPage.
+**
+** "sz" must be the number of bytes in the cell.
+*/
+static void dropCell(MemPage *pPage, int idx, int sz, int *pRC){
+ u32 pc; /* Offset to cell content of cell being deleted */
+ u8 *data; /* pPage->aData */
+ u8 *ptr; /* Used to move bytes around within data[] */
+ int rc; /* The return code */
+ int hdr; /* Beginning of the header. 0 most pages. 100 page 1 */
+
+ if( *pRC ) return;
+ assert( idx>=0 && idx<pPage->nCell );
+ assert( CORRUPT_DB || sz==cellSize(pPage, idx) );
+ assert( sqlite3PagerIswriteable(pPage->pDbPage) );
+ assert( sqlite3_mutex_held(pPage->pBt->mutex) );
+ data = pPage->aData;
+ ptr = &pPage->aCellIdx[2*idx];
+ pc = get2byte(ptr);
+ hdr = pPage->hdrOffset;
+ testcase( pc==get2byte(&data[hdr+5]) );
+ testcase( pc+sz==pPage->pBt->usableSize );
+ if( pc < (u32)get2byte(&data[hdr+5]) || pc+sz > pPage->pBt->usableSize ){
+ *pRC = SQLITE_CORRUPT_BKPT;
+ return;
+ }
+ rc = freeSpace(pPage, pc, sz);
+ if( rc ){
+ *pRC = rc;
+ return;
+ }
+ pPage->nCell--;
+ if( pPage->nCell==0 ){
+ memset(&data[hdr+1], 0, 4);
+ data[hdr+7] = 0;
+ put2byte(&data[hdr+5], pPage->pBt->usableSize);
+ pPage->nFree = pPage->pBt->usableSize - pPage->hdrOffset
+ - pPage->childPtrSize - 8;
+ }else{
+ memmove(ptr, ptr+2, 2*(pPage->nCell - idx));
+ put2byte(&data[hdr+3], pPage->nCell);
+ pPage->nFree += 2;
+ }
+}
+
+/*
+** Insert a new cell on pPage at cell index "i". pCell points to the
+** content of the cell.
+**
+** If the cell content will fit on the page, then put it there. If it
+** will not fit, then make a copy of the cell content into pTemp if
+** pTemp is not null. Regardless of pTemp, allocate a new entry
+** in pPage->apOvfl[] and make it point to the cell content (either
+** in pTemp or the original pCell) and also record its index.
+** Allocating a new entry in pPage->aCell[] implies that
+** pPage->nOverflow is incremented.
+**
+** *pRC must be SQLITE_OK when this routine is called.
+*/
+static void insertCell(
+ MemPage *pPage, /* Page into which we are copying */
+ int i, /* New cell becomes the i-th cell of the page */
+ u8 *pCell, /* Content of the new cell */
+ int sz, /* Bytes of content in pCell */
+ u8 *pTemp, /* Temp storage space for pCell, if needed */
+ Pgno iChild, /* If non-zero, replace first 4 bytes with this value */
+ int *pRC /* Read and write return code from here */
+){
+ int idx = 0; /* Where to write new cell content in data[] */
+ int j; /* Loop counter */
+ u8 *data; /* The content of the whole page */
+ u8 *pIns; /* The point in pPage->aCellIdx[] where no cell inserted */
+
+ assert( *pRC==SQLITE_OK );
+ assert( i>=0 && i<=pPage->nCell+pPage->nOverflow );
+ assert( MX_CELL(pPage->pBt)<=10921 );
+ assert( pPage->nCell<=MX_CELL(pPage->pBt) || CORRUPT_DB );
+ assert( pPage->nOverflow<=ArraySize(pPage->apOvfl) );
+ assert( ArraySize(pPage->apOvfl)==ArraySize(pPage->aiOvfl) );
+ assert( sqlite3_mutex_held(pPage->pBt->mutex) );
+ /* The cell should normally be sized correctly. However, when moving a
+ ** malformed cell from a leaf page to an interior page, if the cell size
+ ** wanted to be less than 4 but got rounded up to 4 on the leaf, then size
+ ** might be less than 8 (leaf-size + pointer) on the interior node. Hence
+ ** the term after the || in the following assert(). */
+ assert( sz==pPage->xCellSize(pPage, pCell) || (sz==8 && iChild>0) );
+ if( pPage->nOverflow || sz+2>pPage->nFree ){
+ if( pTemp ){
+ memcpy(pTemp, pCell, sz);
+ pCell = pTemp;
+ }
+ if( iChild ){
+ put4byte(pCell, iChild);
+ }
+ j = pPage->nOverflow++;
+ /* Comparison against ArraySize-1 since we hold back one extra slot
+ ** as a contingency. In other words, never need more than 3 overflow
+ ** slots but 4 are allocated, just to be safe. */
+ assert( j < ArraySize(pPage->apOvfl)-1 );
+ pPage->apOvfl[j] = pCell;
+ pPage->aiOvfl[j] = (u16)i;
+
+ /* When multiple overflows occur, they are always sequential and in
+ ** sorted order. This invariants arise because multiple overflows can
+ ** only occur when inserting divider cells into the parent page during
+ ** balancing, and the dividers are adjacent and sorted.
+ */
+ assert( j==0 || pPage->aiOvfl[j-1]<(u16)i ); /* Overflows in sorted order */
+ assert( j==0 || i==pPage->aiOvfl[j-1]+1 ); /* Overflows are sequential */
+ }else{
+ int rc = sqlite3PagerWrite(pPage->pDbPage);
+ if( rc!=SQLITE_OK ){
+ *pRC = rc;
+ return;
+ }
+ assert( sqlite3PagerIswriteable(pPage->pDbPage) );
+ data = pPage->aData;
+ assert( &data[pPage->cellOffset]==pPage->aCellIdx );
+ rc = allocateSpace(pPage, sz, &idx);
+ if( rc ){ *pRC = rc; return; }
+ /* The allocateSpace() routine guarantees the following properties
+ ** if it returns successfully */
+ assert( idx >= 0 );
+ assert( idx >= pPage->cellOffset+2*pPage->nCell+2 || CORRUPT_DB );
+ assert( idx+sz <= (int)pPage->pBt->usableSize );
+ pPage->nFree -= (u16)(2 + sz);
+ memcpy(&data[idx], pCell, sz);
+ if( iChild ){
+ put4byte(&data[idx], iChild);
+ }
+ pIns = pPage->aCellIdx + i*2;
+ memmove(pIns+2, pIns, 2*(pPage->nCell - i));
+ put2byte(pIns, idx);
+ pPage->nCell++;
+ /* increment the cell count */
+ if( (++data[pPage->hdrOffset+4])==0 ) data[pPage->hdrOffset+3]++;
+ assert( get2byte(&data[pPage->hdrOffset+3])==pPage->nCell );
+#ifndef SQLITE_OMIT_AUTOVACUUM
+ if( pPage->pBt->autoVacuum ){
+ /* The cell may contain a pointer to an overflow page. If so, write
+ ** the entry for the overflow page into the pointer map.
+ */
+ ptrmapPutOvflPtr(pPage, pCell, pRC);
+ }
+#endif
+ }
+}
+
+/*
+** A CellArray object contains a cache of pointers and sizes for a
+** consecutive sequence of cells that might be held on multiple pages.
+*/
+typedef struct CellArray CellArray;
+struct CellArray {
+ int nCell; /* Number of cells in apCell[] */
+ MemPage *pRef; /* Reference page */
+ u8 **apCell; /* All cells begin balanced */
+ u16 *szCell; /* Local size of all cells in apCell[] */
+};
+
+/*
+** Make sure the cell sizes at idx, idx+1, ..., idx+N-1 have been
+** computed.
+*/
+static void populateCellCache(CellArray *p, int idx, int N){
+ assert( idx>=0 && idx+N<=p->nCell );
+ while( N>0 ){
+ assert( p->apCell[idx]!=0 );
+ if( p->szCell[idx]==0 ){
+ p->szCell[idx] = p->pRef->xCellSize(p->pRef, p->apCell[idx]);
+ }else{
+ assert( CORRUPT_DB ||
+ p->szCell[idx]==p->pRef->xCellSize(p->pRef, p->apCell[idx]) );
+ }
+ idx++;
+ N--;
+ }
+}
+
+/*
+** Return the size of the Nth element of the cell array
+*/
+static SQLITE_NOINLINE u16 computeCellSize(CellArray *p, int N){
+ assert( N>=0 && N<p->nCell );
+ assert( p->szCell[N]==0 );
+ p->szCell[N] = p->pRef->xCellSize(p->pRef, p->apCell[N]);
+ return p->szCell[N];
+}
+static u16 cachedCellSize(CellArray *p, int N){
+ assert( N>=0 && N<p->nCell );
+ if( p->szCell[N] ) return p->szCell[N];
+ return computeCellSize(p, N);
+}
+
+/*
+** Array apCell[] contains pointers to nCell b-tree page cells. The
+** szCell[] array contains the size in bytes of each cell. This function
+** replaces the current contents of page pPg with the contents of the cell
+** array.
+**
+** Some of the cells in apCell[] may currently be stored in pPg. This
+** function works around problems caused by this by making a copy of any
+** such cells before overwriting the page data.
+**
+** The MemPage.nFree field is invalidated by this function. It is the
+** responsibility of the caller to set it correctly.
+*/
+static int rebuildPage(
+ MemPage *pPg, /* Edit this page */
+ int nCell, /* Final number of cells on page */
+ u8 **apCell, /* Array of cells */
+ u16 *szCell /* Array of cell sizes */
+){
+ const int hdr = pPg->hdrOffset; /* Offset of header on pPg */
+ u8 * const aData = pPg->aData; /* Pointer to data for pPg */
+ const int usableSize = pPg->pBt->usableSize;
+ u8 * const pEnd = &aData[usableSize];
+ int i;
+ u8 *pCellptr = pPg->aCellIdx;
+ u8 *pTmp = sqlite3PagerTempSpace(pPg->pBt->pPager);
+ u8 *pData;
+
+ i = get2byte(&aData[hdr+5]);
+ memcpy(&pTmp[i], &aData[i], usableSize - i);
+
+ pData = pEnd;
+ for(i=0; i<nCell; i++){
+ u8 *pCell = apCell[i];
+ if( SQLITE_WITHIN(pCell,aData,pEnd) ){
+ pCell = &pTmp[pCell - aData];
+ }
+ pData -= szCell[i];
+ put2byte(pCellptr, (pData - aData));
+ pCellptr += 2;
+ if( pData < pCellptr ) return SQLITE_CORRUPT_BKPT;
+ memcpy(pData, pCell, szCell[i]);
+ assert( szCell[i]==pPg->xCellSize(pPg, pCell) || CORRUPT_DB );
+ testcase( szCell[i]!=pPg->xCellSize(pPg,pCell) );
+ }
+
+ /* The pPg->nFree field is now set incorrectly. The caller will fix it. */
+ pPg->nCell = nCell;
+ pPg->nOverflow = 0;
+
+ put2byte(&aData[hdr+1], 0);
+ put2byte(&aData[hdr+3], pPg->nCell);
+ put2byte(&aData[hdr+5], pData - aData);
+ aData[hdr+7] = 0x00;
+ return SQLITE_OK;
+}
+
+/*
+** Array apCell[] contains nCell pointers to b-tree cells. Array szCell
+** contains the size in bytes of each such cell. This function attempts to
+** add the cells stored in the array to page pPg. If it cannot (because
+** the page needs to be defragmented before the cells will fit), non-zero
+** is returned. Otherwise, if the cells are added successfully, zero is
+** returned.
+**
+** Argument pCellptr points to the first entry in the cell-pointer array
+** (part of page pPg) to populate. After cell apCell[0] is written to the
+** page body, a 16-bit offset is written to pCellptr. And so on, for each
+** cell in the array. It is the responsibility of the caller to ensure
+** that it is safe to overwrite this part of the cell-pointer array.
+**
+** When this function is called, *ppData points to the start of the
+** content area on page pPg. If the size of the content area is extended,
+** *ppData is updated to point to the new start of the content area
+** before returning.
+**
+** Finally, argument pBegin points to the byte immediately following the
+** end of the space required by this page for the cell-pointer area (for
+** all cells - not just those inserted by the current call). If the content
+** area must be extended to before this point in order to accomodate all
+** cells in apCell[], then the cells do not fit and non-zero is returned.
+*/
+static int pageInsertArray(
+ MemPage *pPg, /* Page to add cells to */
+ u8 *pBegin, /* End of cell-pointer array */
+ u8 **ppData, /* IN/OUT: Page content -area pointer */
+ u8 *pCellptr, /* Pointer to cell-pointer area */
+ int iFirst, /* Index of first cell to add */
+ int nCell, /* Number of cells to add to pPg */
+ CellArray *pCArray /* Array of cells */
+){
+ int i;
+ u8 *aData = pPg->aData;
+ u8 *pData = *ppData;
+ int iEnd = iFirst + nCell;
+ assert( CORRUPT_DB || pPg->hdrOffset==0 ); /* Never called on page 1 */
+ for(i=iFirst; i<iEnd; i++){
+ int sz, rc;
+ u8 *pSlot;
+ sz = cachedCellSize(pCArray, i);
+ if( (aData[1]==0 && aData[2]==0) || (pSlot = pageFindSlot(pPg,sz,&rc))==0 ){
+ if( (pData - pBegin)<sz ) return 1;
+ pData -= sz;
+ pSlot = pData;
+ }
+ /* pSlot and pCArray->apCell[i] will never overlap on a well-formed
+ ** database. But they might for a corrupt database. Hence use memmove()
+ ** since memcpy() sends SIGABORT with overlapping buffers on OpenBSD */
+ assert( (pSlot+sz)<=pCArray->apCell[i]
+ || pSlot>=(pCArray->apCell[i]+sz)
+ || CORRUPT_DB );
+ memmove(pSlot, pCArray->apCell[i], sz);
+ put2byte(pCellptr, (pSlot - aData));
+ pCellptr += 2;
+ }
+ *ppData = pData;
+ return 0;
+}
+
+/*
+** Array apCell[] contains nCell pointers to b-tree cells. Array szCell
+** contains the size in bytes of each such cell. This function adds the
+** space associated with each cell in the array that is currently stored
+** within the body of pPg to the pPg free-list. The cell-pointers and other
+** fields of the page are not updated.
+**
+** This function returns the total number of cells added to the free-list.
+*/
+static int pageFreeArray(
+ MemPage *pPg, /* Page to edit */
+ int iFirst, /* First cell to delete */
+ int nCell, /* Cells to delete */
+ CellArray *pCArray /* Array of cells */
+){
+ u8 * const aData = pPg->aData;
+ u8 * const pEnd = &aData[pPg->pBt->usableSize];
+ u8 * const pStart = &aData[pPg->hdrOffset + 8 + pPg->childPtrSize];
+ int nRet = 0;
+ int i;
+ int iEnd = iFirst + nCell;
+ u8 *pFree = 0;
+ int szFree = 0;
+
+ for(i=iFirst; i<iEnd; i++){
+ u8 *pCell = pCArray->apCell[i];
+ if( SQLITE_WITHIN(pCell, pStart, pEnd) ){
+ int sz;
+ /* No need to use cachedCellSize() here. The sizes of all cells that
+ ** are to be freed have already been computing while deciding which
+ ** cells need freeing */
+ sz = pCArray->szCell[i]; assert( sz>0 );
+ if( pFree!=(pCell + sz) ){
+ if( pFree ){
+ assert( pFree>aData && (pFree - aData)<65536 );
+ freeSpace(pPg, (u16)(pFree - aData), szFree);
+ }
+ pFree = pCell;
+ szFree = sz;
+ if( pFree+sz>pEnd ) return 0;
+ }else{
+ pFree = pCell;
+ szFree += sz;
+ }
+ nRet++;
+ }
+ }
+ if( pFree ){
+ assert( pFree>aData && (pFree - aData)<65536 );
+ freeSpace(pPg, (u16)(pFree - aData), szFree);
+ }
+ return nRet;
+}
+
+/*
+** apCell[] and szCell[] contains pointers to and sizes of all cells in the
+** pages being balanced. The current page, pPg, has pPg->nCell cells starting
+** with apCell[iOld]. After balancing, this page should hold nNew cells
+** starting at apCell[iNew].
+**
+** This routine makes the necessary adjustments to pPg so that it contains
+** the correct cells after being balanced.
+**
+** The pPg->nFree field is invalid when this function returns. It is the
+** responsibility of the caller to set it correctly.
+*/
+static int editPage(
+ MemPage *pPg, /* Edit this page */
+ int iOld, /* Index of first cell currently on page */
+ int iNew, /* Index of new first cell on page */
+ int nNew, /* Final number of cells on page */
+ CellArray *pCArray /* Array of cells and sizes */
+){
+ u8 * const aData = pPg->aData;
+ const int hdr = pPg->hdrOffset;
+ u8 *pBegin = &pPg->aCellIdx[nNew * 2];
+ int nCell = pPg->nCell; /* Cells stored on pPg */
+ u8 *pData;
+ u8 *pCellptr;
+ int i;
+ int iOldEnd = iOld + pPg->nCell + pPg->nOverflow;
+ int iNewEnd = iNew + nNew;
+
+#ifdef SQLITE_DEBUG
+ u8 *pTmp = sqlite3PagerTempSpace(pPg->pBt->pPager);
+ memcpy(pTmp, aData, pPg->pBt->usableSize);
+#endif
+
+ /* Remove cells from the start and end of the page */
+ if( iOld<iNew ){
+ int nShift = pageFreeArray(pPg, iOld, iNew-iOld, pCArray);
+ memmove(pPg->aCellIdx, &pPg->aCellIdx[nShift*2], nCell*2);
+ nCell -= nShift;
+ }
+ if( iNewEnd < iOldEnd ){
+ nCell -= pageFreeArray(pPg, iNewEnd, iOldEnd - iNewEnd, pCArray);
+ }
+
+ pData = &aData[get2byteNotZero(&aData[hdr+5])];
+ if( pData<pBegin ) goto editpage_fail;
+
+ /* Add cells to the start of the page */
+ if( iNew<iOld ){
+ int nAdd = MIN(nNew,iOld-iNew);
+ assert( (iOld-iNew)<nNew || nCell==0 || CORRUPT_DB );
+ pCellptr = pPg->aCellIdx;
+ memmove(&pCellptr[nAdd*2], pCellptr, nCell*2);
+ if( pageInsertArray(
+ pPg, pBegin, &pData, pCellptr,
+ iNew, nAdd, pCArray
+ ) ) goto editpage_fail;
+ nCell += nAdd;
+ }
+
+ /* Add any overflow cells */
+ for(i=0; i<pPg->nOverflow; i++){
+ int iCell = (iOld + pPg->aiOvfl[i]) - iNew;
+ if( iCell>=0 && iCell<nNew ){
+ pCellptr = &pPg->aCellIdx[iCell * 2];
+ memmove(&pCellptr[2], pCellptr, (nCell - iCell) * 2);
+ nCell++;
+ if( pageInsertArray(
+ pPg, pBegin, &pData, pCellptr,
+ iCell+iNew, 1, pCArray
+ ) ) goto editpage_fail;
+ }
+ }
+
+ /* Append cells to the end of the page */
+ pCellptr = &pPg->aCellIdx[nCell*2];
+ if( pageInsertArray(
+ pPg, pBegin, &pData, pCellptr,
+ iNew+nCell, nNew-nCell, pCArray
+ ) ) goto editpage_fail;
+
+ pPg->nCell = nNew;
+ pPg->nOverflow = 0;
+
+ put2byte(&aData[hdr+3], pPg->nCell);
+ put2byte(&aData[hdr+5], pData - aData);
+
+#ifdef SQLITE_DEBUG
+ for(i=0; i<nNew && !CORRUPT_DB; i++){
+ u8 *pCell = pCArray->apCell[i+iNew];
+ int iOff = get2byteAligned(&pPg->aCellIdx[i*2]);
+ if( SQLITE_WITHIN(pCell, aData, &aData[pPg->pBt->usableSize]) ){
+ pCell = &pTmp[pCell - aData];
+ }
+ assert( 0==memcmp(pCell, &aData[iOff],
+ pCArray->pRef->xCellSize(pCArray->pRef, pCArray->apCell[i+iNew])) );
+ }
+#endif
+
+ return SQLITE_OK;
+ editpage_fail:
+ /* Unable to edit this page. Rebuild it from scratch instead. */
+ populateCellCache(pCArray, iNew, nNew);
+ return rebuildPage(pPg, nNew, &pCArray->apCell[iNew], &pCArray->szCell[iNew]);
+}
+
+/*
+** The following parameters determine how many adjacent pages get involved
+** in a balancing operation. NN is the number of neighbors on either side
+** of the page that participate in the balancing operation. NB is the
+** total number of pages that participate, including the target page and
+** NN neighbors on either side.
+**
+** The minimum value of NN is 1 (of course). Increasing NN above 1
+** (to 2 or 3) gives a modest improvement in SELECT and DELETE performance
+** in exchange for a larger degradation in INSERT and UPDATE performance.
+** The value of NN appears to give the best results overall.
+*/
+#define NN 1 /* Number of neighbors on either side of pPage */
+#define NB (NN*2+1) /* Total pages involved in the balance */
+
+
+#ifndef SQLITE_OMIT_QUICKBALANCE
+/*
+** This version of balance() handles the common special case where
+** a new entry is being inserted on the extreme right-end of the
+** tree, in other words, when the new entry will become the largest
+** entry in the tree.
+**
+** Instead of trying to balance the 3 right-most leaf pages, just add
+** a new page to the right-hand side and put the one new entry in
+** that page. This leaves the right side of the tree somewhat
+** unbalanced. But odds are that we will be inserting new entries
+** at the end soon afterwards so the nearly empty page will quickly
+** fill up. On average.
+**
+** pPage is the leaf page which is the right-most page in the tree.
+** pParent is its parent. pPage must have a single overflow entry
+** which is also the right-most entry on the page.
+**
+** The pSpace buffer is used to store a temporary copy of the divider
+** cell that will be inserted into pParent. Such a cell consists of a 4
+** byte page number followed by a variable length integer. In other
+** words, at most 13 bytes. Hence the pSpace buffer must be at
+** least 13 bytes in size.
+*/
+static int balance_quick(MemPage *pParent, MemPage *pPage, u8 *pSpace){
+ BtShared *const pBt = pPage->pBt; /* B-Tree Database */
+ MemPage *pNew; /* Newly allocated page */
+ int rc; /* Return Code */
+ Pgno pgnoNew; /* Page number of pNew */
+
+ assert( sqlite3_mutex_held(pPage->pBt->mutex) );
+ assert( sqlite3PagerIswriteable(pParent->pDbPage) );
+ assert( pPage->nOverflow==1 );
+
+ /* This error condition is now caught prior to reaching this function */
+ if( NEVER(pPage->nCell==0) ) return SQLITE_CORRUPT_BKPT;
+
+ /* Allocate a new page. This page will become the right-sibling of
+ ** pPage. Make the parent page writable, so that the new divider cell
+ ** may be inserted. If both these operations are successful, proceed.
+ */
+ rc = allocateBtreePage(pBt, &pNew, &pgnoNew, 0, 0);
+
+ if( rc==SQLITE_OK ){
+
+ u8 *pOut = &pSpace[4];
+ u8 *pCell = pPage->apOvfl[0];
+ u16 szCell = pPage->xCellSize(pPage, pCell);
+ u8 *pStop;
+
+ assert( sqlite3PagerIswriteable(pNew->pDbPage) );
+ assert( pPage->aData[0]==(PTF_INTKEY|PTF_LEAFDATA|PTF_LEAF) );
+ zeroPage(pNew, PTF_INTKEY|PTF_LEAFDATA|PTF_LEAF);
+ rc = rebuildPage(pNew, 1, &pCell, &szCell);
+ if( NEVER(rc) ) return rc;
+ pNew->nFree = pBt->usableSize - pNew->cellOffset - 2 - szCell;
+
+ /* If this is an auto-vacuum database, update the pointer map
+ ** with entries for the new page, and any pointer from the
+ ** cell on the page to an overflow page. If either of these
+ ** operations fails, the return code is set, but the contents
+ ** of the parent page are still manipulated by thh code below.
+ ** That is Ok, at this point the parent page is guaranteed to
+ ** be marked as dirty. Returning an error code will cause a
+ ** rollback, undoing any changes made to the parent page.
+ */
+ if( ISAUTOVACUUM ){
+ ptrmapPut(pBt, pgnoNew, PTRMAP_BTREE, pParent->pgno, &rc);
+ if( szCell>pNew->minLocal ){
+ ptrmapPutOvflPtr(pNew, pCell, &rc);
+ }
+ }
+
+ /* Create a divider cell to insert into pParent. The divider cell
+ ** consists of a 4-byte page number (the page number of pPage) and
+ ** a variable length key value (which must be the same value as the
+ ** largest key on pPage).
+ **
+ ** To find the largest key value on pPage, first find the right-most
+ ** cell on pPage. The first two fields of this cell are the
+ ** record-length (a variable length integer at most 32-bits in size)
+ ** and the key value (a variable length integer, may have any value).
+ ** The first of the while(...) loops below skips over the record-length
+ ** field. The second while(...) loop copies the key value from the
+ ** cell on pPage into the pSpace buffer.
+ */
+ pCell = findCell(pPage, pPage->nCell-1);
+ pStop = &pCell[9];
+ while( (*(pCell++)&0x80) && pCell<pStop );
+ pStop = &pCell[9];
+ while( ((*(pOut++) = *(pCell++))&0x80) && pCell<pStop );
+
+ /* Insert the new divider cell into pParent. */
+ if( rc==SQLITE_OK ){
+ insertCell(pParent, pParent->nCell, pSpace, (int)(pOut-pSpace),
+ 0, pPage->pgno, &rc);
+ }
+
+ /* Set the right-child pointer of pParent to point to the new page. */
+ put4byte(&pParent->aData[pParent->hdrOffset+8], pgnoNew);
+
+ /* Release the reference to the new page. */
+ releasePage(pNew);
+ }
+
+ return rc;
+}
+#endif /* SQLITE_OMIT_QUICKBALANCE */
+
+#if 0
+/*
+** This function does not contribute anything to the operation of SQLite.
+** it is sometimes activated temporarily while debugging code responsible
+** for setting pointer-map entries.
+*/
+static int ptrmapCheckPages(MemPage **apPage, int nPage){
+ int i, j;
+ for(i=0; i<nPage; i++){
+ Pgno n;
+ u8 e;
+ MemPage *pPage = apPage[i];
+ BtShared *pBt = pPage->pBt;
+ assert( pPage->isInit );
+
+ for(j=0; j<pPage->nCell; j++){
+ CellInfo info;
+ u8 *z;
+
+ z = findCell(pPage, j);
+ pPage->xParseCell(pPage, z, &info);
+ if( info.nLocal<info.nPayload ){
+ Pgno ovfl = get4byte(&z[info.nSize-4]);
+ ptrmapGet(pBt, ovfl, &e, &n);
+ assert( n==pPage->pgno && e==PTRMAP_OVERFLOW1 );
+ }
+ if( !pPage->leaf ){
+ Pgno child = get4byte(z);
+ ptrmapGet(pBt, child, &e, &n);
+ assert( n==pPage->pgno && e==PTRMAP_BTREE );
+ }
+ }
+ if( !pPage->leaf ){
+ Pgno child = get4byte(&pPage->aData[pPage->hdrOffset+8]);
+ ptrmapGet(pBt, child, &e, &n);
+ assert( n==pPage->pgno && e==PTRMAP_BTREE );
+ }
+ }
+ return 1;
+}
+#endif
+
+/*
+** This function is used to copy the contents of the b-tree node stored
+** on page pFrom to page pTo. If page pFrom was not a leaf page, then
+** the pointer-map entries for each child page are updated so that the
+** parent page stored in the pointer map is page pTo. If pFrom contained
+** any cells with overflow page pointers, then the corresponding pointer
+** map entries are also updated so that the parent page is page pTo.
+**
+** If pFrom is currently carrying any overflow cells (entries in the
+** MemPage.apOvfl[] array), they are not copied to pTo.
+**
+** Before returning, page pTo is reinitialized using btreeInitPage().
+**
+** The performance of this function is not critical. It is only used by
+** the balance_shallower() and balance_deeper() procedures, neither of
+** which are called often under normal circumstances.
+*/
+static void copyNodeContent(MemPage *pFrom, MemPage *pTo, int *pRC){
+ if( (*pRC)==SQLITE_OK ){
+ BtShared * const pBt = pFrom->pBt;
+ u8 * const aFrom = pFrom->aData;
+ u8 * const aTo = pTo->aData;
+ int const iFromHdr = pFrom->hdrOffset;
+ int const iToHdr = ((pTo->pgno==1) ? 100 : 0);
+ int rc;
+ int iData;
+
+
+ assert( pFrom->isInit );
+ assert( pFrom->nFree>=iToHdr );
+ assert( get2byte(&aFrom[iFromHdr+5]) <= (int)pBt->usableSize );
+
+ /* Copy the b-tree node content from page pFrom to page pTo. */
+ iData = get2byte(&aFrom[iFromHdr+5]);
+ memcpy(&aTo[iData], &aFrom[iData], pBt->usableSize-iData);
+ memcpy(&aTo[iToHdr], &aFrom[iFromHdr], pFrom->cellOffset + 2*pFrom->nCell);
+
+ /* Reinitialize page pTo so that the contents of the MemPage structure
+ ** match the new data. The initialization of pTo can actually fail under
+ ** fairly obscure circumstances, even though it is a copy of initialized
+ ** page pFrom.
+ */
+ pTo->isInit = 0;
+ rc = btreeInitPage(pTo);
+ if( rc!=SQLITE_OK ){
+ *pRC = rc;
+ return;
+ }
+
+ /* If this is an auto-vacuum database, update the pointer-map entries
+ ** for any b-tree or overflow pages that pTo now contains the pointers to.
+ */
+ if( ISAUTOVACUUM ){
+ *pRC = setChildPtrmaps(pTo);
+ }
+ }
+}
+
+/*
+** This routine redistributes cells on the iParentIdx'th child of pParent
+** (hereafter "the page") and up to 2 siblings so that all pages have about the
+** same amount of free space. Usually a single sibling on either side of the
+** page are used in the balancing, though both siblings might come from one
+** side if the page is the first or last child of its parent. If the page
+** has fewer than 2 siblings (something which can only happen if the page
+** is a root page or a child of a root page) then all available siblings
+** participate in the balancing.
+**
+** The number of siblings of the page might be increased or decreased by
+** one or two in an effort to keep pages nearly full but not over full.
+**
+** Note that when this routine is called, some of the cells on the page
+** might not actually be stored in MemPage.aData[]. This can happen
+** if the page is overfull. This routine ensures that all cells allocated
+** to the page and its siblings fit into MemPage.aData[] before returning.
+**
+** In the course of balancing the page and its siblings, cells may be
+** inserted into or removed from the parent page (pParent). Doing so
+** may cause the parent page to become overfull or underfull. If this
+** happens, it is the responsibility of the caller to invoke the correct
+** balancing routine to fix this problem (see the balance() routine).
+**
+** If this routine fails for any reason, it might leave the database
+** in a corrupted state. So if this routine fails, the database should
+** be rolled back.
+**
+** The third argument to this function, aOvflSpace, is a pointer to a
+** buffer big enough to hold one page. If while inserting cells into the parent
+** page (pParent) the parent page becomes overfull, this buffer is
+** used to store the parent's overflow cells. Because this function inserts
+** a maximum of four divider cells into the parent page, and the maximum
+** size of a cell stored within an internal node is always less than 1/4
+** of the page-size, the aOvflSpace[] buffer is guaranteed to be large
+** enough for all overflow cells.
+**
+** If aOvflSpace is set to a null pointer, this function returns
+** SQLITE_NOMEM.
+*/
+static int balance_nonroot(
+ MemPage *pParent, /* Parent page of siblings being balanced */
+ int iParentIdx, /* Index of "the page" in pParent */
+ u8 *aOvflSpace, /* page-size bytes of space for parent ovfl */
+ int isRoot, /* True if pParent is a root-page */
+ int bBulk /* True if this call is part of a bulk load */
+){
+ BtShared *pBt; /* The whole database */
+ int nMaxCells = 0; /* Allocated size of apCell, szCell, aFrom. */
+ int nNew = 0; /* Number of pages in apNew[] */
+ int nOld; /* Number of pages in apOld[] */
+ int i, j, k; /* Loop counters */
+ int nxDiv; /* Next divider slot in pParent->aCell[] */
+ int rc = SQLITE_OK; /* The return code */
+ u16 leafCorrection; /* 4 if pPage is a leaf. 0 if not */
+ int leafData; /* True if pPage is a leaf of a LEAFDATA tree */
+ int usableSpace; /* Bytes in pPage beyond the header */
+ int pageFlags; /* Value of pPage->aData[0] */
+ int iSpace1 = 0; /* First unused byte of aSpace1[] */
+ int iOvflSpace = 0; /* First unused byte of aOvflSpace[] */
+ int szScratch; /* Size of scratch memory requested */
+ MemPage *apOld[NB]; /* pPage and up to two siblings */
+ MemPage *apNew[NB+2]; /* pPage and up to NB siblings after balancing */
+ u8 *pRight; /* Location in parent of right-sibling pointer */
+ u8 *apDiv[NB-1]; /* Divider cells in pParent */
+ int cntNew[NB+2]; /* Index in b.paCell[] of cell after i-th page */
+ int cntOld[NB+2]; /* Old index in b.apCell[] */
+ int szNew[NB+2]; /* Combined size of cells placed on i-th page */
+ u8 *aSpace1; /* Space for copies of dividers cells */
+ Pgno pgno; /* Temp var to store a page number in */
+ u8 abDone[NB+2]; /* True after i'th new page is populated */
+ Pgno aPgno[NB+2]; /* Page numbers of new pages before shuffling */
+ Pgno aPgOrder[NB+2]; /* Copy of aPgno[] used for sorting pages */
+ u16 aPgFlags[NB+2]; /* flags field of new pages before shuffling */
+ CellArray b; /* Parsed information on cells being balanced */
+
+ memset(abDone, 0, sizeof(abDone));
+ b.nCell = 0;
+ b.apCell = 0;
+ pBt = pParent->pBt;
+ assert( sqlite3_mutex_held(pBt->mutex) );
+ assert( sqlite3PagerIswriteable(pParent->pDbPage) );
+
+#if 0
+ TRACE(("BALANCE: begin page %d child of %d\n", pPage->pgno, pParent->pgno));
+#endif
+
+ /* At this point pParent may have at most one overflow cell. And if
+ ** this overflow cell is present, it must be the cell with
+ ** index iParentIdx. This scenario comes about when this function
+ ** is called (indirectly) from sqlite3BtreeDelete().
+ */
+ assert( pParent->nOverflow==0 || pParent->nOverflow==1 );
+ assert( pParent->nOverflow==0 || pParent->aiOvfl[0]==iParentIdx );
+
+ if( !aOvflSpace ){
+ return SQLITE_NOMEM_BKPT;
+ }
+
+ /* Find the sibling pages to balance. Also locate the cells in pParent
+ ** that divide the siblings. An attempt is made to find NN siblings on
+ ** either side of pPage. More siblings are taken from one side, however,
+ ** if there are fewer than NN siblings on the other side. If pParent
+ ** has NB or fewer children then all children of pParent are taken.
+ **
+ ** This loop also drops the divider cells from the parent page. This
+ ** way, the remainder of the function does not have to deal with any
+ ** overflow cells in the parent page, since if any existed they will
+ ** have already been removed.
+ */
+ i = pParent->nOverflow + pParent->nCell;
+ if( i<2 ){
+ nxDiv = 0;
+ }else{
+ assert( bBulk==0 || bBulk==1 );
+ if( iParentIdx==0 ){
+ nxDiv = 0;
+ }else if( iParentIdx==i ){
+ nxDiv = i-2+bBulk;
+ }else{
+ nxDiv = iParentIdx-1;
+ }
+ i = 2-bBulk;
+ }
+ nOld = i+1;
+ if( (i+nxDiv-pParent->nOverflow)==pParent->nCell ){
+ pRight = &pParent->aData[pParent->hdrOffset+8];
+ }else{
+ pRight = findCell(pParent, i+nxDiv-pParent->nOverflow);
+ }
+ pgno = get4byte(pRight);
+ while( 1 ){
+ rc = getAndInitPage(pBt, pgno, &apOld[i], 0, 0);
+ if( rc ){
+ memset(apOld, 0, (i+1)*sizeof(MemPage*));
+ goto balance_cleanup;
+ }
+ nMaxCells += 1+apOld[i]->nCell+apOld[i]->nOverflow;
+ if( (i--)==0 ) break;
+
+ if( pParent->nOverflow && i+nxDiv==pParent->aiOvfl[0] ){
+ apDiv[i] = pParent->apOvfl[0];
+ pgno = get4byte(apDiv[i]);
+ szNew[i] = pParent->xCellSize(pParent, apDiv[i]);
+ pParent->nOverflow = 0;
+ }else{
+ apDiv[i] = findCell(pParent, i+nxDiv-pParent->nOverflow);
+ pgno = get4byte(apDiv[i]);
+ szNew[i] = pParent->xCellSize(pParent, apDiv[i]);
+
+ /* Drop the cell from the parent page. apDiv[i] still points to
+ ** the cell within the parent, even though it has been dropped.
+ ** This is safe because dropping a cell only overwrites the first
+ ** four bytes of it, and this function does not need the first
+ ** four bytes of the divider cell. So the pointer is safe to use
+ ** later on.
+ **
+ ** But not if we are in secure-delete mode. In secure-delete mode,
+ ** the dropCell() routine will overwrite the entire cell with zeroes.
+ ** In this case, temporarily copy the cell into the aOvflSpace[]
+ ** buffer. It will be copied out again as soon as the aSpace[] buffer
+ ** is allocated. */
+ if( pBt->btsFlags & BTS_SECURE_DELETE ){
+ int iOff;
+
+ iOff = SQLITE_PTR_TO_INT(apDiv[i]) - SQLITE_PTR_TO_INT(pParent->aData);
+ if( (iOff+szNew[i])>(int)pBt->usableSize ){
+ rc = SQLITE_CORRUPT_BKPT;
+ memset(apOld, 0, (i+1)*sizeof(MemPage*));
+ goto balance_cleanup;
+ }else{
+ memcpy(&aOvflSpace[iOff], apDiv[i], szNew[i]);
+ apDiv[i] = &aOvflSpace[apDiv[i]-pParent->aData];
+ }
+ }
+ dropCell(pParent, i+nxDiv-pParent->nOverflow, szNew[i], &rc);
+ }
+ }
+
+ /* Make nMaxCells a multiple of 4 in order to preserve 8-byte
+ ** alignment */
+ nMaxCells = (nMaxCells + 3)&~3;
+
+ /*
+ ** Allocate space for memory structures
+ */
+ szScratch =
+ nMaxCells*sizeof(u8*) /* b.apCell */
+ + nMaxCells*sizeof(u16) /* b.szCell */
+ + pBt->pageSize; /* aSpace1 */
+
+ /* EVIDENCE-OF: R-28375-38319 SQLite will never request a scratch buffer
+ ** that is more than 6 times the database page size. */
+ assert( szScratch<=6*(int)pBt->pageSize );
+ b.apCell = sqlite3ScratchMalloc( szScratch );
+ if( b.apCell==0 ){
+ rc = SQLITE_NOMEM_BKPT;
+ goto balance_cleanup;
+ }
+ b.szCell = (u16*)&b.apCell[nMaxCells];
+ aSpace1 = (u8*)&b.szCell[nMaxCells];
+ assert( EIGHT_BYTE_ALIGNMENT(aSpace1) );
+
+ /*
+ ** Load pointers to all cells on sibling pages and the divider cells
+ ** into the local b.apCell[] array. Make copies of the divider cells
+ ** into space obtained from aSpace1[]. The divider cells have already
+ ** been removed from pParent.
+ **
+ ** If the siblings are on leaf pages, then the child pointers of the
+ ** divider cells are stripped from the cells before they are copied
+ ** into aSpace1[]. In this way, all cells in b.apCell[] are without
+ ** child pointers. If siblings are not leaves, then all cell in
+ ** b.apCell[] include child pointers. Either way, all cells in b.apCell[]
+ ** are alike.
+ **
+ ** leafCorrection: 4 if pPage is a leaf. 0 if pPage is not a leaf.
+ ** leafData: 1 if pPage holds key+data and pParent holds only keys.
+ */
+ b.pRef = apOld[0];
+ leafCorrection = b.pRef->leaf*4;
+ leafData = b.pRef->intKeyLeaf;
+ for(i=0; i<nOld; i++){
+ MemPage *pOld = apOld[i];
+ int limit = pOld->nCell;
+ u8 *aData = pOld->aData;
+ u16 maskPage = pOld->maskPage;
+ u8 *piCell = aData + pOld->cellOffset;
+ u8 *piEnd;
+
+ /* Verify that all sibling pages are of the same "type" (table-leaf,
+ ** table-interior, index-leaf, or index-interior).
+ */
+ if( pOld->aData[0]!=apOld[0]->aData[0] ){
+ rc = SQLITE_CORRUPT_BKPT;
+ goto balance_cleanup;
+ }
+
+ /* Load b.apCell[] with pointers to all cells in pOld. If pOld
+ ** constains overflow cells, include them in the b.apCell[] array
+ ** in the correct spot.
+ **
+ ** Note that when there are multiple overflow cells, it is always the
+ ** case that they are sequential and adjacent. This invariant arises
+ ** because multiple overflows can only occurs when inserting divider
+ ** cells into a parent on a prior balance, and divider cells are always
+ ** adjacent and are inserted in order. There is an assert() tagged
+ ** with "NOTE 1" in the overflow cell insertion loop to prove this
+ ** invariant.
+ **
+ ** This must be done in advance. Once the balance starts, the cell
+ ** offset section of the btree page will be overwritten and we will no
+ ** long be able to find the cells if a pointer to each cell is not saved
+ ** first.
+ */
+ memset(&b.szCell[b.nCell], 0, sizeof(b.szCell[0])*(limit+pOld->nOverflow));
+ if( pOld->nOverflow>0 ){
+ limit = pOld->aiOvfl[0];
+ for(j=0; j<limit; j++){
+ b.apCell[b.nCell] = aData + (maskPage & get2byteAligned(piCell));
+ piCell += 2;
+ b.nCell++;
+ }
+ for(k=0; k<pOld->nOverflow; k++){
+ assert( k==0 || pOld->aiOvfl[k-1]+1==pOld->aiOvfl[k] );/* NOTE 1 */
+ b.apCell[b.nCell] = pOld->apOvfl[k];
+ b.nCell++;
+ }
+ }
+ piEnd = aData + pOld->cellOffset + 2*pOld->nCell;
+ while( piCell<piEnd ){
+ assert( b.nCell<nMaxCells );
+ b.apCell[b.nCell] = aData + (maskPage & get2byteAligned(piCell));
+ piCell += 2;
+ b.nCell++;
+ }
+
+ cntOld[i] = b.nCell;
+ if( i<nOld-1 && !leafData){
+ u16 sz = (u16)szNew[i];
+ u8 *pTemp;
+ assert( b.nCell<nMaxCells );
+ b.szCell[b.nCell] = sz;
+ pTemp = &aSpace1[iSpace1];
+ iSpace1 += sz;
+ assert( sz<=pBt->maxLocal+23 );
+ assert( iSpace1 <= (int)pBt->pageSize );
+ memcpy(pTemp, apDiv[i], sz);
+ b.apCell[b.nCell] = pTemp+leafCorrection;
+ assert( leafCorrection==0 || leafCorrection==4 );
+ b.szCell[b.nCell] = b.szCell[b.nCell] - leafCorrection;
+ if( !pOld->leaf ){
+ assert( leafCorrection==0 );
+ assert( pOld->hdrOffset==0 );
+ /* The right pointer of the child page pOld becomes the left
+ ** pointer of the divider cell */
+ memcpy(b.apCell[b.nCell], &pOld->aData[8], 4);
+ }else{
+ assert( leafCorrection==4 );
+ while( b.szCell[b.nCell]<4 ){
+ /* Do not allow any cells smaller than 4 bytes. If a smaller cell
+ ** does exist, pad it with 0x00 bytes. */
+ assert( b.szCell[b.nCell]==3 || CORRUPT_DB );
+ assert( b.apCell[b.nCell]==&aSpace1[iSpace1-3] || CORRUPT_DB );
+ aSpace1[iSpace1++] = 0x00;
+ b.szCell[b.nCell]++;
+ }
+ }
+ b.nCell++;
+ }
+ }
+
+ /*
+ ** Figure out the number of pages needed to hold all b.nCell cells.
+ ** Store this number in "k". Also compute szNew[] which is the total
+ ** size of all cells on the i-th page and cntNew[] which is the index
+ ** in b.apCell[] of the cell that divides page i from page i+1.
+ ** cntNew[k] should equal b.nCell.
+ **
+ ** Values computed by this block:
+ **
+ ** k: The total number of sibling pages
+ ** szNew[i]: Spaced used on the i-th sibling page.
+ ** cntNew[i]: Index in b.apCell[] and b.szCell[] for the first cell to
+ ** the right of the i-th sibling page.
+ ** usableSpace: Number of bytes of space available on each sibling.
+ **
+ */
+ usableSpace = pBt->usableSize - 12 + leafCorrection;
+ for(i=0; i<nOld; i++){
+ MemPage *p = apOld[i];
+ szNew[i] = usableSpace - p->nFree;
+ for(j=0; j<p->nOverflow; j++){
+ szNew[i] += 2 + p->xCellSize(p, p->apOvfl[j]);
+ }
+ cntNew[i] = cntOld[i];
+ }
+ k = nOld;
+ for(i=0; i<k; i++){
+ int sz;
+ while( szNew[i]>usableSpace ){
+ if( i+1>=k ){
+ k = i+2;
+ if( k>NB+2 ){ rc = SQLITE_CORRUPT_BKPT; goto balance_cleanup; }
+ szNew[k-1] = 0;
+ cntNew[k-1] = b.nCell;
+ }
+ sz = 2 + cachedCellSize(&b, cntNew[i]-1);
+ szNew[i] -= sz;
+ if( !leafData ){
+ if( cntNew[i]<b.nCell ){
+ sz = 2 + cachedCellSize(&b, cntNew[i]);
+ }else{
+ sz = 0;
+ }
+ }
+ szNew[i+1] += sz;
+ cntNew[i]--;
+ }
+ while( cntNew[i]<b.nCell ){
+ sz = 2 + cachedCellSize(&b, cntNew[i]);
+ if( szNew[i]+sz>usableSpace ) break;
+ szNew[i] += sz;
+ cntNew[i]++;
+ if( !leafData ){
+ if( cntNew[i]<b.nCell ){
+ sz = 2 + cachedCellSize(&b, cntNew[i]);
+ }else{
+ sz = 0;
+ }
+ }
+ szNew[i+1] -= sz;
+ }
+ if( cntNew[i]>=b.nCell ){
+ k = i+1;
+ }else if( cntNew[i] <= (i>0 ? cntNew[i-1] : 0) ){
+ rc = SQLITE_CORRUPT_BKPT;
+ goto balance_cleanup;
+ }
+ }
+
+ /*
+ ** The packing computed by the previous block is biased toward the siblings
+ ** on the left side (siblings with smaller keys). The left siblings are
+ ** always nearly full, while the right-most sibling might be nearly empty.
+ ** The next block of code attempts to adjust the packing of siblings to
+ ** get a better balance.
+ **
+ ** This adjustment is more than an optimization. The packing above might
+ ** be so out of balance as to be illegal. For example, the right-most
+ ** sibling might be completely empty. This adjustment is not optional.
+ */
+ for(i=k-1; i>0; i--){
+ int szRight = szNew[i]; /* Size of sibling on the right */
+ int szLeft = szNew[i-1]; /* Size of sibling on the left */
+ int r; /* Index of right-most cell in left sibling */
+ int d; /* Index of first cell to the left of right sibling */
+
+ r = cntNew[i-1] - 1;
+ d = r + 1 - leafData;
+ (void)cachedCellSize(&b, d);
+ do{
+ assert( d<nMaxCells );
+ assert( r<nMaxCells );
+ (void)cachedCellSize(&b, r);
+ if( szRight!=0
+ && (bBulk || szRight+b.szCell[d]+2 > szLeft-(b.szCell[r]+(i==k-1?0:2)))){
+ break;
+ }
+ szRight += b.szCell[d] + 2;
+ szLeft -= b.szCell[r] + 2;
+ cntNew[i-1] = r;
+ r--;
+ d--;
+ }while( r>=0 );
+ szNew[i] = szRight;
+ szNew[i-1] = szLeft;
+ if( cntNew[i-1] <= (i>1 ? cntNew[i-2] : 0) ){
+ rc = SQLITE_CORRUPT_BKPT;
+ goto balance_cleanup;
+ }
+ }
+
+ /* Sanity check: For a non-corrupt database file one of the follwing
+ ** must be true:
+ ** (1) We found one or more cells (cntNew[0])>0), or
+ ** (2) pPage is a virtual root page. A virtual root page is when
+ ** the real root page is page 1 and we are the only child of
+ ** that page.
+ */
+ assert( cntNew[0]>0 || (pParent->pgno==1 && pParent->nCell==0) || CORRUPT_DB);
+ TRACE(("BALANCE: old: %d(nc=%d) %d(nc=%d) %d(nc=%d)\n",
+ apOld[0]->pgno, apOld[0]->nCell,
+ nOld>=2 ? apOld[1]->pgno : 0, nOld>=2 ? apOld[1]->nCell : 0,
+ nOld>=3 ? apOld[2]->pgno : 0, nOld>=3 ? apOld[2]->nCell : 0
+ ));
+
+ /*
+ ** Allocate k new pages. Reuse old pages where possible.
+ */
+ pageFlags = apOld[0]->aData[0];
+ for(i=0; i<k; i++){
+ MemPage *pNew;
+ if( i<nOld ){
+ pNew = apNew[i] = apOld[i];
+ apOld[i] = 0;
+ rc = sqlite3PagerWrite(pNew->pDbPage);
+ nNew++;
+ if( rc ) goto balance_cleanup;
+ }else{
+ assert( i>0 );
+ rc = allocateBtreePage(pBt, &pNew, &pgno, (bBulk ? 1 : pgno), 0);
+ if( rc ) goto balance_cleanup;
+ zeroPage(pNew, pageFlags);
+ apNew[i] = pNew;
+ nNew++;
+ cntOld[i] = b.nCell;
+
+ /* Set the pointer-map entry for the new sibling page. */
+ if( ISAUTOVACUUM ){
+ ptrmapPut(pBt, pNew->pgno, PTRMAP_BTREE, pParent->pgno, &rc);
+ if( rc!=SQLITE_OK ){
+ goto balance_cleanup;
+ }
+ }
+ }
+ }
+
+ /*
+ ** Reassign page numbers so that the new pages are in ascending order.
+ ** This helps to keep entries in the disk file in order so that a scan
+ ** of the table is closer to a linear scan through the file. That in turn
+ ** helps the operating system to deliver pages from the disk more rapidly.
+ **
+ ** An O(n^2) insertion sort algorithm is used, but since n is never more
+ ** than (NB+2) (a small constant), that should not be a problem.
+ **
+ ** When NB==3, this one optimization makes the database about 25% faster
+ ** for large insertions and deletions.
+ */
+ for(i=0; i<nNew; i++){
+ aPgOrder[i] = aPgno[i] = apNew[i]->pgno;
+ aPgFlags[i] = apNew[i]->pDbPage->flags;
+ for(j=0; j<i; j++){
+ if( aPgno[j]==aPgno[i] ){
+ /* This branch is taken if the set of sibling pages somehow contains
+ ** duplicate entries. This can happen if the database is corrupt.
+ ** It would be simpler to detect this as part of the loop below, but
+ ** we do the detection here in order to avoid populating the pager
+ ** cache with two separate objects associated with the same
+ ** page number. */
+ assert( CORRUPT_DB );
+ rc = SQLITE_CORRUPT_BKPT;
+ goto balance_cleanup;
+ }
+ }
+ }
+ for(i=0; i<nNew; i++){
+ int iBest = 0; /* aPgno[] index of page number to use */
+ for(j=1; j<nNew; j++){
+ if( aPgOrder[j]<aPgOrder[iBest] ) iBest = j;
+ }
+ pgno = aPgOrder[iBest];
+ aPgOrder[iBest] = 0xffffffff;
+ if( iBest!=i ){
+ if( iBest>i ){
+ sqlite3PagerRekey(apNew[iBest]->pDbPage, pBt->nPage+iBest+1, 0);
+ }
+ sqlite3PagerRekey(apNew[i]->pDbPage, pgno, aPgFlags[iBest]);
+ apNew[i]->pgno = pgno;
+ }
+ }
+
+ TRACE(("BALANCE: new: %d(%d nc=%d) %d(%d nc=%d) %d(%d nc=%d) "
+ "%d(%d nc=%d) %d(%d nc=%d)\n",
+ apNew[0]->pgno, szNew[0], cntNew[0],
+ nNew>=2 ? apNew[1]->pgno : 0, nNew>=2 ? szNew[1] : 0,
+ nNew>=2 ? cntNew[1] - cntNew[0] - !leafData : 0,
+ nNew>=3 ? apNew[2]->pgno : 0, nNew>=3 ? szNew[2] : 0,
+ nNew>=3 ? cntNew[2] - cntNew[1] - !leafData : 0,
+ nNew>=4 ? apNew[3]->pgno : 0, nNew>=4 ? szNew[3] : 0,
+ nNew>=4 ? cntNew[3] - cntNew[2] - !leafData : 0,
+ nNew>=5 ? apNew[4]->pgno : 0, nNew>=5 ? szNew[4] : 0,
+ nNew>=5 ? cntNew[4] - cntNew[3] - !leafData : 0
+ ));
+
+ assert( sqlite3PagerIswriteable(pParent->pDbPage) );
+ put4byte(pRight, apNew[nNew-1]->pgno);
+
+ /* If the sibling pages are not leaves, ensure that the right-child pointer
+ ** of the right-most new sibling page is set to the value that was
+ ** originally in the same field of the right-most old sibling page. */
+ if( (pageFlags & PTF_LEAF)==0 && nOld!=nNew ){
+ MemPage *pOld = (nNew>nOld ? apNew : apOld)[nOld-1];
+ memcpy(&apNew[nNew-1]->aData[8], &pOld->aData[8], 4);
+ }
+
+ /* Make any required updates to pointer map entries associated with
+ ** cells stored on sibling pages following the balance operation. Pointer
+ ** map entries associated with divider cells are set by the insertCell()
+ ** routine. The associated pointer map entries are:
+ **
+ ** a) if the cell contains a reference to an overflow chain, the
+ ** entry associated with the first page in the overflow chain, and
+ **
+ ** b) if the sibling pages are not leaves, the child page associated
+ ** with the cell.
+ **
+ ** If the sibling pages are not leaves, then the pointer map entry
+ ** associated with the right-child of each sibling may also need to be
+ ** updated. This happens below, after the sibling pages have been
+ ** populated, not here.
+ */
+ if( ISAUTOVACUUM ){
+ MemPage *pNew = apNew[0];
+ u8 *aOld = pNew->aData;
+ int cntOldNext = pNew->nCell + pNew->nOverflow;
+ int usableSize = pBt->usableSize;
+ int iNew = 0;
+ int iOld = 0;
+
+ for(i=0; i<b.nCell; i++){
+ u8 *pCell = b.apCell[i];
+ if( i==cntOldNext ){
+ MemPage *pOld = (++iOld)<nNew ? apNew[iOld] : apOld[iOld];
+ cntOldNext += pOld->nCell + pOld->nOverflow + !leafData;
+ aOld = pOld->aData;
+ }
+ if( i==cntNew[iNew] ){
+ pNew = apNew[++iNew];
+ if( !leafData ) continue;
+ }
+
+ /* Cell pCell is destined for new sibling page pNew. Originally, it
+ ** was either part of sibling page iOld (possibly an overflow cell),
+ ** or else the divider cell to the left of sibling page iOld. So,
+ ** if sibling page iOld had the same page number as pNew, and if
+ ** pCell really was a part of sibling page iOld (not a divider or
+ ** overflow cell), we can skip updating the pointer map entries. */
+ if( iOld>=nNew
+ || pNew->pgno!=aPgno[iOld]
+ || !SQLITE_WITHIN(pCell,aOld,&aOld[usableSize])
+ ){
+ if( !leafCorrection ){
+ ptrmapPut(pBt, get4byte(pCell), PTRMAP_BTREE, pNew->pgno, &rc);
+ }
+ if( cachedCellSize(&b,i)>pNew->minLocal ){
+ ptrmapPutOvflPtr(pNew, pCell, &rc);
+ }
+ if( rc ) goto balance_cleanup;
+ }
+ }
+ }
+
+ /* Insert new divider cells into pParent. */
+ for(i=0; i<nNew-1; i++){
+ u8 *pCell;
+ u8 *pTemp;
+ int sz;
+ MemPage *pNew = apNew[i];
+ j = cntNew[i];
+
+ assert( j<nMaxCells );
+ assert( b.apCell[j]!=0 );
+ pCell = b.apCell[j];
+ sz = b.szCell[j] + leafCorrection;
+ pTemp = &aOvflSpace[iOvflSpace];
+ if( !pNew->leaf ){
+ memcpy(&pNew->aData[8], pCell, 4);
+ }else if( leafData ){
+ /* If the tree is a leaf-data tree, and the siblings are leaves,
+ ** then there is no divider cell in b.apCell[]. Instead, the divider
+ ** cell consists of the integer key for the right-most cell of
+ ** the sibling-page assembled above only.
+ */
+ CellInfo info;
+ j--;
+ pNew->xParseCell(pNew, b.apCell[j], &info);
+ pCell = pTemp;
+ sz = 4 + putVarint(&pCell[4], info.nKey);
+ pTemp = 0;
+ }else{
+ pCell -= 4;
+ /* Obscure case for non-leaf-data trees: If the cell at pCell was
+ ** previously stored on a leaf node, and its reported size was 4
+ ** bytes, then it may actually be smaller than this
+ ** (see btreeParseCellPtr(), 4 bytes is the minimum size of
+ ** any cell). But it is important to pass the correct size to
+ ** insertCell(), so reparse the cell now.
+ **
+ ** This can only happen for b-trees used to evaluate "IN (SELECT ...)"
+ ** and WITHOUT ROWID tables with exactly one column which is the
+ ** primary key.
+ */
+ if( b.szCell[j]==4 ){
+ assert(leafCorrection==4);
+ sz = pParent->xCellSize(pParent, pCell);
+ }
+ }
+ iOvflSpace += sz;
+ assert( sz<=pBt->maxLocal+23 );
+ assert( iOvflSpace <= (int)pBt->pageSize );
+ insertCell(pParent, nxDiv+i, pCell, sz, pTemp, pNew->pgno, &rc);
+ if( rc!=SQLITE_OK ) goto balance_cleanup;
+ assert( sqlite3PagerIswriteable(pParent->pDbPage) );
+ }
+
+ /* Now update the actual sibling pages. The order in which they are updated
+ ** is important, as this code needs to avoid disrupting any page from which
+ ** cells may still to be read. In practice, this means:
+ **
+ ** (1) If cells are moving left (from apNew[iPg] to apNew[iPg-1])
+ ** then it is not safe to update page apNew[iPg] until after
+ ** the left-hand sibling apNew[iPg-1] has been updated.
+ **
+ ** (2) If cells are moving right (from apNew[iPg] to apNew[iPg+1])
+ ** then it is not safe to update page apNew[iPg] until after
+ ** the right-hand sibling apNew[iPg+1] has been updated.
+ **
+ ** If neither of the above apply, the page is safe to update.
+ **
+ ** The iPg value in the following loop starts at nNew-1 goes down
+ ** to 0, then back up to nNew-1 again, thus making two passes over
+ ** the pages. On the initial downward pass, only condition (1) above
+ ** needs to be tested because (2) will always be true from the previous
+ ** step. On the upward pass, both conditions are always true, so the
+ ** upwards pass simply processes pages that were missed on the downward
+ ** pass.
+ */
+ for(i=1-nNew; i<nNew; i++){
+ int iPg = i<0 ? -i : i;
+ assert( iPg>=0 && iPg<nNew );
+ if( abDone[iPg] ) continue; /* Skip pages already processed */
+ if( i>=0 /* On the upwards pass, or... */
+ || cntOld[iPg-1]>=cntNew[iPg-1] /* Condition (1) is true */
+ ){
+ int iNew;
+ int iOld;
+ int nNewCell;
+
+ /* Verify condition (1): If cells are moving left, update iPg
+ ** only after iPg-1 has already been updated. */
+ assert( iPg==0 || cntOld[iPg-1]>=cntNew[iPg-1] || abDone[iPg-1] );
+
+ /* Verify condition (2): If cells are moving right, update iPg
+ ** only after iPg+1 has already been updated. */
+ assert( cntNew[iPg]>=cntOld[iPg] || abDone[iPg+1] );
+
+ if( iPg==0 ){
+ iNew = iOld = 0;
+ nNewCell = cntNew[0];
+ }else{
+ iOld = iPg<nOld ? (cntOld[iPg-1] + !leafData) : b.nCell;
+ iNew = cntNew[iPg-1] + !leafData;
+ nNewCell = cntNew[iPg] - iNew;
+ }
+
+ rc = editPage(apNew[iPg], iOld, iNew, nNewCell, &b);
+ if( rc ) goto balance_cleanup;
+ abDone[iPg]++;
+ apNew[iPg]->nFree = usableSpace-szNew[iPg];
+ assert( apNew[iPg]->nOverflow==0 );
+ assert( apNew[iPg]->nCell==nNewCell );
+ }
+ }
+
+ /* All pages have been processed exactly once */
+ assert( memcmp(abDone, "\01\01\01\01\01", nNew)==0 );
+
+ assert( nOld>0 );
+ assert( nNew>0 );
+
+ if( isRoot && pParent->nCell==0 && pParent->hdrOffset<=apNew[0]->nFree ){
+ /* The root page of the b-tree now contains no cells. The only sibling
+ ** page is the right-child of the parent. Copy the contents of the
+ ** child page into the parent, decreasing the overall height of the
+ ** b-tree structure by one. This is described as the "balance-shallower"
+ ** sub-algorithm in some documentation.
+ **
+ ** If this is an auto-vacuum database, the call to copyNodeContent()
+ ** sets all pointer-map entries corresponding to database image pages
+ ** for which the pointer is stored within the content being copied.
+ **
+ ** It is critical that the child page be defragmented before being
+ ** copied into the parent, because if the parent is page 1 then it will
+ ** by smaller than the child due to the database header, and so all the
+ ** free space needs to be up front.
+ */
+ assert( nNew==1 || CORRUPT_DB );
+ rc = defragmentPage(apNew[0]);
+ testcase( rc!=SQLITE_OK );
+ assert( apNew[0]->nFree ==
+ (get2byte(&apNew[0]->aData[5])-apNew[0]->cellOffset-apNew[0]->nCell*2)
+ || rc!=SQLITE_OK
+ );
+ copyNodeContent(apNew[0], pParent, &rc);
+ freePage(apNew[0], &rc);
+ }else if( ISAUTOVACUUM && !leafCorrection ){
+ /* Fix the pointer map entries associated with the right-child of each
+ ** sibling page. All other pointer map entries have already been taken
+ ** care of. */
+ for(i=0; i<nNew; i++){
+ u32 key = get4byte(&apNew[i]->aData[8]);
+ ptrmapPut(pBt, key, PTRMAP_BTREE, apNew[i]->pgno, &rc);
+ }
+ }
+
+ assert( pParent->isInit );
+ TRACE(("BALANCE: finished: old=%d new=%d cells=%d\n",
+ nOld, nNew, b.nCell));
+
+ /* Free any old pages that were not reused as new pages.
+ */
+ for(i=nNew; i<nOld; i++){
+ freePage(apOld[i], &rc);
+ }
+
+#if 0
+ if( ISAUTOVACUUM && rc==SQLITE_OK && apNew[0]->isInit ){
+ /* The ptrmapCheckPages() contains assert() statements that verify that
+ ** all pointer map pages are set correctly. This is helpful while
+ ** debugging. This is usually disabled because a corrupt database may
+ ** cause an assert() statement to fail. */
+ ptrmapCheckPages(apNew, nNew);
+ ptrmapCheckPages(&pParent, 1);
+ }
+#endif
+
+ /*
+ ** Cleanup before returning.
+ */
+balance_cleanup:
+ sqlite3ScratchFree(b.apCell);
+ for(i=0; i<nOld; i++){
+ releasePage(apOld[i]);
+ }
+ for(i=0; i<nNew; i++){
+ releasePage(apNew[i]);
+ }
+
+ return rc;
+}
+
+
+/*
+** This function is called when the root page of a b-tree structure is
+** overfull (has one or more overflow pages).
+**
+** A new child page is allocated and the contents of the current root
+** page, including overflow cells, are copied into the child. The root
+** page is then overwritten to make it an empty page with the right-child
+** pointer pointing to the new page.
+**
+** Before returning, all pointer-map entries corresponding to pages
+** that the new child-page now contains pointers to are updated. The
+** entry corresponding to the new right-child pointer of the root
+** page is also updated.
+**
+** If successful, *ppChild is set to contain a reference to the child
+** page and SQLITE_OK is returned. In this case the caller is required
+** to call releasePage() on *ppChild exactly once. If an error occurs,
+** an error code is returned and *ppChild is set to 0.
+*/
+static int balance_deeper(MemPage *pRoot, MemPage **ppChild){
+ int rc; /* Return value from subprocedures */
+ MemPage *pChild = 0; /* Pointer to a new child page */
+ Pgno pgnoChild = 0; /* Page number of the new child page */
+ BtShared *pBt = pRoot->pBt; /* The BTree */
+
+ assert( pRoot->nOverflow>0 );
+ assert( sqlite3_mutex_held(pBt->mutex) );
+
+ /* Make pRoot, the root page of the b-tree, writable. Allocate a new
+ ** page that will become the new right-child of pPage. Copy the contents
+ ** of the node stored on pRoot into the new child page.
+ */
+ rc = sqlite3PagerWrite(pRoot->pDbPage);
+ if( rc==SQLITE_OK ){
+ rc = allocateBtreePage(pBt,&pChild,&pgnoChild,pRoot->pgno,0);
+ copyNodeContent(pRoot, pChild, &rc);
+ if( ISAUTOVACUUM ){
+ ptrmapPut(pBt, pgnoChild, PTRMAP_BTREE, pRoot->pgno, &rc);
+ }
+ }
+ if( rc ){
+ *ppChild = 0;
+ releasePage(pChild);
+ return rc;
+ }
+ assert( sqlite3PagerIswriteable(pChild->pDbPage) );
+ assert( sqlite3PagerIswriteable(pRoot->pDbPage) );
+ assert( pChild->nCell==pRoot->nCell );
+
+ TRACE(("BALANCE: copy root %d into %d\n", pRoot->pgno, pChild->pgno));
+
+ /* Copy the overflow cells from pRoot to pChild */
+ memcpy(pChild->aiOvfl, pRoot->aiOvfl,
+ pRoot->nOverflow*sizeof(pRoot->aiOvfl[0]));
+ memcpy(pChild->apOvfl, pRoot->apOvfl,
+ pRoot->nOverflow*sizeof(pRoot->apOvfl[0]));
+ pChild->nOverflow = pRoot->nOverflow;
+
+ /* Zero the contents of pRoot. Then install pChild as the right-child. */
+ zeroPage(pRoot, pChild->aData[0] & ~PTF_LEAF);
+ put4byte(&pRoot->aData[pRoot->hdrOffset+8], pgnoChild);
+
+ *ppChild = pChild;
+ return SQLITE_OK;
+}
+
+/*
+** The page that pCur currently points to has just been modified in
+** some way. This function figures out if this modification means the
+** tree needs to be balanced, and if so calls the appropriate balancing
+** routine. Balancing routines are:
+**
+** balance_quick()
+** balance_deeper()
+** balance_nonroot()
+*/
+static int balance(BtCursor *pCur){
+ int rc = SQLITE_OK;
+ const int nMin = pCur->pBt->usableSize * 2 / 3;
+ u8 aBalanceQuickSpace[13];
+ u8 *pFree = 0;
+
+ VVA_ONLY( int balance_quick_called = 0 );
+ VVA_ONLY( int balance_deeper_called = 0 );
+
+ do {
+ int iPage = pCur->iPage;
+ MemPage *pPage = pCur->apPage[iPage];
+
+ if( iPage==0 ){
+ if( pPage->nOverflow ){
+ /* The root page of the b-tree is overfull. In this case call the
+ ** balance_deeper() function to create a new child for the root-page
+ ** and copy the current contents of the root-page to it. The
+ ** next iteration of the do-loop will balance the child page.
+ */
+ assert( balance_deeper_called==0 );
+ VVA_ONLY( balance_deeper_called++ );
+ rc = balance_deeper(pPage, &pCur->apPage[1]);
+ if( rc==SQLITE_OK ){
+ pCur->iPage = 1;
+ pCur->aiIdx[0] = 0;
+ pCur->aiIdx[1] = 0;
+ assert( pCur->apPage[1]->nOverflow );
+ }
+ }else{
+ break;
+ }
+ }else if( pPage->nOverflow==0 && pPage->nFree<=nMin ){
+ break;
+ }else{
+ MemPage * const pParent = pCur->apPage[iPage-1];
+ int const iIdx = pCur->aiIdx[iPage-1];
+
+ rc = sqlite3PagerWrite(pParent->pDbPage);
+ if( rc==SQLITE_OK ){
+#ifndef SQLITE_OMIT_QUICKBALANCE
+ if( pPage->intKeyLeaf
+ && pPage->nOverflow==1
+ && pPage->aiOvfl[0]==pPage->nCell
+ && pParent->pgno!=1
+ && pParent->nCell==iIdx
+ ){
+ /* Call balance_quick() to create a new sibling of pPage on which
+ ** to store the overflow cell. balance_quick() inserts a new cell
+ ** into pParent, which may cause pParent overflow. If this
+ ** happens, the next iteration of the do-loop will balance pParent
+ ** use either balance_nonroot() or balance_deeper(). Until this
+ ** happens, the overflow cell is stored in the aBalanceQuickSpace[]
+ ** buffer.
+ **
+ ** The purpose of the following assert() is to check that only a
+ ** single call to balance_quick() is made for each call to this
+ ** function. If this were not verified, a subtle bug involving reuse
+ ** of the aBalanceQuickSpace[] might sneak in.
+ */
+ assert( balance_quick_called==0 );
+ VVA_ONLY( balance_quick_called++ );
+ rc = balance_quick(pParent, pPage, aBalanceQuickSpace);
+ }else
+#endif
+ {
+ /* In this case, call balance_nonroot() to redistribute cells
+ ** between pPage and up to 2 of its sibling pages. This involves
+ ** modifying the contents of pParent, which may cause pParent to
+ ** become overfull or underfull. The next iteration of the do-loop
+ ** will balance the parent page to correct this.
+ **
+ ** If the parent page becomes overfull, the overflow cell or cells
+ ** are stored in the pSpace buffer allocated immediately below.
+ ** A subsequent iteration of the do-loop will deal with this by
+ ** calling balance_nonroot() (balance_deeper() may be called first,
+ ** but it doesn't deal with overflow cells - just moves them to a
+ ** different page). Once this subsequent call to balance_nonroot()
+ ** has completed, it is safe to release the pSpace buffer used by
+ ** the previous call, as the overflow cell data will have been
+ ** copied either into the body of a database page or into the new
+ ** pSpace buffer passed to the latter call to balance_nonroot().
+ */
+ u8 *pSpace = sqlite3PageMalloc(pCur->pBt->pageSize);
+ rc = balance_nonroot(pParent, iIdx, pSpace, iPage==1,
+ pCur->hints&BTREE_BULKLOAD);
+ if( pFree ){
+ /* If pFree is not NULL, it points to the pSpace buffer used
+ ** by a previous call to balance_nonroot(). Its contents are
+ ** now stored either on real database pages or within the
+ ** new pSpace buffer, so it may be safely freed here. */
+ sqlite3PageFree(pFree);
+ }
+
+ /* The pSpace buffer will be freed after the next call to
+ ** balance_nonroot(), or just before this function returns, whichever
+ ** comes first. */
+ pFree = pSpace;
+ }
+ }
+
+ pPage->nOverflow = 0;
+
+ /* The next iteration of the do-loop balances the parent page. */
+ releasePage(pPage);
+ pCur->iPage--;
+ assert( pCur->iPage>=0 );
+ }
+ }while( rc==SQLITE_OK );
+
+ if( pFree ){
+ sqlite3PageFree(pFree);
+ }
+ return rc;
+}
+
+
+/*
+** Insert a new record into the BTree. The content of the new record
+** is described by the pX object. The pCur cursor is used only to
+** define what table the record should be inserted into, and is left
+** pointing at a random location.
+**
+** For a table btree (used for rowid tables), only the pX.nKey value of
+** the key is used. The pX.pKey value must be NULL. The pX.nKey is the
+** rowid or INTEGER PRIMARY KEY of the row. The pX.nData,pData,nZero fields
+** hold the content of the row.
+**
+** For an index btree (used for indexes and WITHOUT ROWID tables), the
+** key is an arbitrary byte sequence stored in pX.pKey,nKey. The
+** pX.pData,nData,nZero fields must be zero.
+**
+** If the seekResult parameter is non-zero, then a successful call to
+** MovetoUnpacked() to seek cursor pCur to (pKey,nKey) has already
+** been performed. In other words, if seekResult!=0 then the cursor
+** is currently pointing to a cell that will be adjacent to the cell
+** to be inserted. If seekResult<0 then pCur points to a cell that is
+** smaller then (pKey,nKey). If seekResult>0 then pCur points to a cell
+** that is larger than (pKey,nKey).
+**
+** If seekResult==0, that means pCur is pointing at some unknown location.
+** In that case, this routine must seek the cursor to the correct insertion
+** point for (pKey,nKey) before doing the insertion. For index btrees,
+** if pX->nMem is non-zero, then pX->aMem contains pointers to the unpacked
+** key values and pX->aMem can be used instead of pX->pKey to avoid having
+** to decode the key.
+*/
+SQLITE_PRIVATE int sqlite3BtreeInsert(
+ BtCursor *pCur, /* Insert data into the table of this cursor */
+ const BtreePayload *pX, /* Content of the row to be inserted */
+ int flags, /* True if this is likely an append */
+ int seekResult /* Result of prior MovetoUnpacked() call */
+){
+ int rc;
+ int loc = seekResult; /* -1: before desired location +1: after */
+ int szNew = 0;
+ int idx;
+ MemPage *pPage;
+ Btree *p = pCur->pBtree;
+ BtShared *pBt = p->pBt;
+ unsigned char *oldCell;
+ unsigned char *newCell = 0;
+
+ assert( (flags & (BTREE_SAVEPOSITION|BTREE_APPEND))==flags );
+
+ if( pCur->eState==CURSOR_FAULT ){
+ assert( pCur->skipNext!=SQLITE_OK );
+ return pCur->skipNext;
+ }
+
+ assert( cursorOwnsBtShared(pCur) );
+ assert( (pCur->curFlags & BTCF_WriteFlag)!=0
+ && pBt->inTransaction==TRANS_WRITE
+ && (pBt->btsFlags & BTS_READ_ONLY)==0 );
+ assert( hasSharedCacheTableLock(p, pCur->pgnoRoot, pCur->pKeyInfo!=0, 2) );
+
+ /* Assert that the caller has been consistent. If this cursor was opened
+ ** expecting an index b-tree, then the caller should be inserting blob
+ ** keys with no associated data. If the cursor was opened expecting an
+ ** intkey table, the caller should be inserting integer keys with a
+ ** blob of associated data. */
+ assert( (pX->pKey==0)==(pCur->pKeyInfo==0) );
+
+ /* Save the positions of any other cursors open on this table.
+ **
+ ** In some cases, the call to btreeMoveto() below is a no-op. For
+ ** example, when inserting data into a table with auto-generated integer
+ ** keys, the VDBE layer invokes sqlite3BtreeLast() to figure out the
+ ** integer key to use. It then calls this function to actually insert the
+ ** data into the intkey B-Tree. In this case btreeMoveto() recognizes
+ ** that the cursor is already where it needs to be and returns without
+ ** doing any work. To avoid thwarting these optimizations, it is important
+ ** not to clear the cursor here.
+ */
+ if( pCur->curFlags & BTCF_Multiple ){
+ rc = saveAllCursors(pBt, pCur->pgnoRoot, pCur);
+ if( rc ) return rc;
+ }
+
+ if( pCur->pKeyInfo==0 ){
+ assert( pX->pKey==0 );
+ /* If this is an insert into a table b-tree, invalidate any incrblob
+ ** cursors open on the row being replaced */
+ invalidateIncrblobCursors(p, pX->nKey, 0);
+
+ /* If BTREE_SAVEPOSITION is set, the cursor must already be pointing
+ ** to a row with the same key as the new entry being inserted. */
+ assert( (flags & BTREE_SAVEPOSITION)==0 ||
+ ((pCur->curFlags&BTCF_ValidNKey)!=0 && pX->nKey==pCur->info.nKey) );
+
+ /* If the cursor is currently on the last row and we are appending a
+ ** new row onto the end, set the "loc" to avoid an unnecessary
+ ** btreeMoveto() call */
+ if( (pCur->curFlags&BTCF_ValidNKey)!=0 && pX->nKey==pCur->info.nKey ){
+ loc = 0;
+ }else if( (pCur->curFlags&BTCF_ValidNKey)!=0 && pX->nKey>0
+ && pCur->info.nKey==pX->nKey-1 ){
+ loc = -1;
+ }else if( loc==0 ){
+ rc = sqlite3BtreeMovetoUnpacked(pCur, 0, pX->nKey, flags!=0, &loc);
+ if( rc ) return rc;
+ }
+ }else if( loc==0 && (flags & BTREE_SAVEPOSITION)==0 ){
+ if( pX->nMem ){
+ UnpackedRecord r;
+ r.pKeyInfo = pCur->pKeyInfo;
+ r.aMem = pX->aMem;
+ r.nField = pX->nMem;
+ r.default_rc = 0;
+ r.errCode = 0;
+ r.r1 = 0;
+ r.r2 = 0;
+ r.eqSeen = 0;
+ rc = sqlite3BtreeMovetoUnpacked(pCur, &r, 0, flags!=0, &loc);
+ }else{
+ rc = btreeMoveto(pCur, pX->pKey, pX->nKey, flags!=0, &loc);
+ }
+ if( rc ) return rc;
+ }
+ assert( pCur->eState==CURSOR_VALID || (pCur->eState==CURSOR_INVALID && loc) );
+
+ pPage = pCur->apPage[pCur->iPage];
+ assert( pPage->intKey || pX->nKey>=0 );
+ assert( pPage->leaf || !pPage->intKey );
+
+ TRACE(("INSERT: table=%d nkey=%lld ndata=%d page=%d %s\n",
+ pCur->pgnoRoot, pX->nKey, pX->nData, pPage->pgno,
+ loc==0 ? "overwrite" : "new entry"));
+ assert( pPage->isInit );
+ newCell = pBt->pTmpSpace;
+ assert( newCell!=0 );
+ rc = fillInCell(pPage, newCell, pX, &szNew);
+ if( rc ) goto end_insert;
+ assert( szNew==pPage->xCellSize(pPage, newCell) );
+ assert( szNew <= MX_CELL_SIZE(pBt) );
+ idx = pCur->aiIdx[pCur->iPage];
+ if( loc==0 ){
+ CellInfo info;
+ assert( idx<pPage->nCell );
+ rc = sqlite3PagerWrite(pPage->pDbPage);
+ if( rc ){
+ goto end_insert;
+ }
+ oldCell = findCell(pPage, idx);
+ if( !pPage->leaf ){
+ memcpy(newCell, oldCell, 4);
+ }
+ rc = clearCell(pPage, oldCell, &info);
+ if( info.nSize==szNew && info.nLocal==info.nPayload ){
+ /* Overwrite the old cell with the new if they are the same size.
+ ** We could also try to do this if the old cell is smaller, then add
+ ** the leftover space to the free list. But experiments show that
+ ** doing that is no faster then skipping this optimization and just
+ ** calling dropCell() and insertCell(). */
+ assert( rc==SQLITE_OK ); /* clearCell never fails when nLocal==nPayload */
+ if( oldCell+szNew > pPage->aDataEnd ) return SQLITE_CORRUPT_BKPT;
+ memcpy(oldCell, newCell, szNew);
+ return SQLITE_OK;
+ }
+ dropCell(pPage, idx, info.nSize, &rc);
+ if( rc ) goto end_insert;
+ }else if( loc<0 && pPage->nCell>0 ){
+ assert( pPage->leaf );
+ idx = ++pCur->aiIdx[pCur->iPage];
+ }else{
+ assert( pPage->leaf );
+ }
+ insertCell(pPage, idx, newCell, szNew, 0, 0, &rc);
+ assert( pPage->nOverflow==0 || rc==SQLITE_OK );
+ assert( rc!=SQLITE_OK || pPage->nCell>0 || pPage->nOverflow>0 );
+
+ /* If no error has occurred and pPage has an overflow cell, call balance()
+ ** to redistribute the cells within the tree. Since balance() may move
+ ** the cursor, zero the BtCursor.info.nSize and BTCF_ValidNKey
+ ** variables.
+ **
+ ** Previous versions of SQLite called moveToRoot() to move the cursor
+ ** back to the root page as balance() used to invalidate the contents
+ ** of BtCursor.apPage[] and BtCursor.aiIdx[]. Instead of doing that,
+ ** set the cursor state to "invalid". This makes common insert operations
+ ** slightly faster.
+ **
+ ** There is a subtle but important optimization here too. When inserting
+ ** multiple records into an intkey b-tree using a single cursor (as can
+ ** happen while processing an "INSERT INTO ... SELECT" statement), it
+ ** is advantageous to leave the cursor pointing to the last entry in
+ ** the b-tree if possible. If the cursor is left pointing to the last
+ ** entry in the table, and the next row inserted has an integer key
+ ** larger than the largest existing key, it is possible to insert the
+ ** row without seeking the cursor. This can be a big performance boost.
+ */
+ pCur->info.nSize = 0;
+ if( pPage->nOverflow ){
+ assert( rc==SQLITE_OK );
+ pCur->curFlags &= ~(BTCF_ValidNKey);
+ rc = balance(pCur);
+
+ /* Must make sure nOverflow is reset to zero even if the balance()
+ ** fails. Internal data structure corruption will result otherwise.
+ ** Also, set the cursor state to invalid. This stops saveCursorPosition()
+ ** from trying to save the current position of the cursor. */
+ pCur->apPage[pCur->iPage]->nOverflow = 0;
+ pCur->eState = CURSOR_INVALID;
+ if( (flags & BTREE_SAVEPOSITION) && rc==SQLITE_OK ){
+ rc = moveToRoot(pCur);
+ if( pCur->pKeyInfo ){
+ assert( pCur->pKey==0 );
+ pCur->pKey = sqlite3Malloc( pX->nKey );
+ if( pCur->pKey==0 ){
+ rc = SQLITE_NOMEM;
+ }else{
+ memcpy(pCur->pKey, pX->pKey, pX->nKey);
+ }
+ }
+ pCur->eState = CURSOR_REQUIRESEEK;
+ pCur->nKey = pX->nKey;
+ }
+ }
+ assert( pCur->apPage[pCur->iPage]->nOverflow==0 );
+
+end_insert:
+ return rc;
+}
+
+/*
+** Delete the entry that the cursor is pointing to.
+**
+** If the BTREE_SAVEPOSITION bit of the flags parameter is zero, then
+** the cursor is left pointing at an arbitrary location after the delete.
+** But if that bit is set, then the cursor is left in a state such that
+** the next call to BtreeNext() or BtreePrev() moves it to the same row
+** as it would have been on if the call to BtreeDelete() had been omitted.
+**
+** The BTREE_AUXDELETE bit of flags indicates that is one of several deletes
+** associated with a single table entry and its indexes. Only one of those
+** deletes is considered the "primary" delete. The primary delete occurs
+** on a cursor that is not a BTREE_FORDELETE cursor. All but one delete
+** operation on non-FORDELETE cursors is tagged with the AUXDELETE flag.
+** The BTREE_AUXDELETE bit is a hint that is not used by this implementation,
+** but which might be used by alternative storage engines.
+*/
+SQLITE_PRIVATE int sqlite3BtreeDelete(BtCursor *pCur, u8 flags){
+ Btree *p = pCur->pBtree;
+ BtShared *pBt = p->pBt;
+ int rc; /* Return code */
+ MemPage *pPage; /* Page to delete cell from */
+ unsigned char *pCell; /* Pointer to cell to delete */
+ int iCellIdx; /* Index of cell to delete */
+ int iCellDepth; /* Depth of node containing pCell */
+ CellInfo info; /* Size of the cell being deleted */
+ int bSkipnext = 0; /* Leaf cursor in SKIPNEXT state */
+ u8 bPreserve = flags & BTREE_SAVEPOSITION; /* Keep cursor valid */
+
+ assert( cursorOwnsBtShared(pCur) );
+ assert( pBt->inTransaction==TRANS_WRITE );
+ assert( (pBt->btsFlags & BTS_READ_ONLY)==0 );
+ assert( pCur->curFlags & BTCF_WriteFlag );
+ assert( hasSharedCacheTableLock(p, pCur->pgnoRoot, pCur->pKeyInfo!=0, 2) );
+ assert( !hasReadConflicts(p, pCur->pgnoRoot) );
+ assert( pCur->aiIdx[pCur->iPage]<pCur->apPage[pCur->iPage]->nCell );
+ assert( pCur->eState==CURSOR_VALID );
+ assert( (flags & ~(BTREE_SAVEPOSITION | BTREE_AUXDELETE))==0 );
+
+ iCellDepth = pCur->iPage;
+ iCellIdx = pCur->aiIdx[iCellDepth];
+ pPage = pCur->apPage[iCellDepth];
+ pCell = findCell(pPage, iCellIdx);
+
+ /* If the bPreserve flag is set to true, then the cursor position must
+ ** be preserved following this delete operation. If the current delete
+ ** will cause a b-tree rebalance, then this is done by saving the cursor
+ ** key and leaving the cursor in CURSOR_REQUIRESEEK state before
+ ** returning.
+ **
+ ** Or, if the current delete will not cause a rebalance, then the cursor
+ ** will be left in CURSOR_SKIPNEXT state pointing to the entry immediately
+ ** before or after the deleted entry. In this case set bSkipnext to true. */
+ if( bPreserve ){
+ if( !pPage->leaf
+ || (pPage->nFree+cellSizePtr(pPage,pCell)+2)>(int)(pBt->usableSize*2/3)
+ ){
+ /* A b-tree rebalance will be required after deleting this entry.
+ ** Save the cursor key. */
+ rc = saveCursorKey(pCur);
+ if( rc ) return rc;
+ }else{
+ bSkipnext = 1;
+ }
+ }
+
+ /* If the page containing the entry to delete is not a leaf page, move
+ ** the cursor to the largest entry in the tree that is smaller than
+ ** the entry being deleted. This cell will replace the cell being deleted
+ ** from the internal node. The 'previous' entry is used for this instead
+ ** of the 'next' entry, as the previous entry is always a part of the
+ ** sub-tree headed by the child page of the cell being deleted. This makes
+ ** balancing the tree following the delete operation easier. */
+ if( !pPage->leaf ){
+ int notUsed = 0;
+ rc = sqlite3BtreePrevious(pCur, &notUsed);
+ if( rc ) return rc;
+ }
+
+ /* Save the positions of any other cursors open on this table before
+ ** making any modifications. */
+ if( pCur->curFlags & BTCF_Multiple ){
+ rc = saveAllCursors(pBt, pCur->pgnoRoot, pCur);
+ if( rc ) return rc;
+ }
+
+ /* If this is a delete operation to remove a row from a table b-tree,
+ ** invalidate any incrblob cursors open on the row being deleted. */
+ if( pCur->pKeyInfo==0 ){
+ invalidateIncrblobCursors(p, pCur->info.nKey, 0);
+ }
+
+ /* Make the page containing the entry to be deleted writable. Then free any
+ ** overflow pages associated with the entry and finally remove the cell
+ ** itself from within the page. */
+ rc = sqlite3PagerWrite(pPage->pDbPage);
+ if( rc ) return rc;
+ rc = clearCell(pPage, pCell, &info);
+ dropCell(pPage, iCellIdx, info.nSize, &rc);
+ if( rc ) return rc;
+
+ /* If the cell deleted was not located on a leaf page, then the cursor
+ ** is currently pointing to the largest entry in the sub-tree headed
+ ** by the child-page of the cell that was just deleted from an internal
+ ** node. The cell from the leaf node needs to be moved to the internal
+ ** node to replace the deleted cell. */
+ if( !pPage->leaf ){
+ MemPage *pLeaf = pCur->apPage[pCur->iPage];
+ int nCell;
+ Pgno n = pCur->apPage[iCellDepth+1]->pgno;
+ unsigned char *pTmp;
+
+ pCell = findCell(pLeaf, pLeaf->nCell-1);
+ if( pCell<&pLeaf->aData[4] ) return SQLITE_CORRUPT_BKPT;
+ nCell = pLeaf->xCellSize(pLeaf, pCell);
+ assert( MX_CELL_SIZE(pBt) >= nCell );
+ pTmp = pBt->pTmpSpace;
+ assert( pTmp!=0 );
+ rc = sqlite3PagerWrite(pLeaf->pDbPage);
+ if( rc==SQLITE_OK ){
+ insertCell(pPage, iCellIdx, pCell-4, nCell+4, pTmp, n, &rc);
+ }
+ dropCell(pLeaf, pLeaf->nCell-1, nCell, &rc);
+ if( rc ) return rc;
+ }
+
+ /* Balance the tree. If the entry deleted was located on a leaf page,
+ ** then the cursor still points to that page. In this case the first
+ ** call to balance() repairs the tree, and the if(...) condition is
+ ** never true.
+ **
+ ** Otherwise, if the entry deleted was on an internal node page, then
+ ** pCur is pointing to the leaf page from which a cell was removed to
+ ** replace the cell deleted from the internal node. This is slightly
+ ** tricky as the leaf node may be underfull, and the internal node may
+ ** be either under or overfull. In this case run the balancing algorithm
+ ** on the leaf node first. If the balance proceeds far enough up the
+ ** tree that we can be sure that any problem in the internal node has
+ ** been corrected, so be it. Otherwise, after balancing the leaf node,
+ ** walk the cursor up the tree to the internal node and balance it as
+ ** well. */
+ rc = balance(pCur);
+ if( rc==SQLITE_OK && pCur->iPage>iCellDepth ){
+ while( pCur->iPage>iCellDepth ){
+ releasePage(pCur->apPage[pCur->iPage--]);
+ }
+ rc = balance(pCur);
+ }
+
+ if( rc==SQLITE_OK ){
+ if( bSkipnext ){
+ assert( bPreserve && (pCur->iPage==iCellDepth || CORRUPT_DB) );
+ assert( pPage==pCur->apPage[pCur->iPage] || CORRUPT_DB );
+ assert( (pPage->nCell>0 || CORRUPT_DB) && iCellIdx<=pPage->nCell );
+ pCur->eState = CURSOR_SKIPNEXT;
+ if( iCellIdx>=pPage->nCell ){
+ pCur->skipNext = -1;
+ pCur->aiIdx[iCellDepth] = pPage->nCell-1;
+ }else{
+ pCur->skipNext = 1;
+ }
+ }else{
+ rc = moveToRoot(pCur);
+ if( bPreserve ){
+ pCur->eState = CURSOR_REQUIRESEEK;
+ }
+ }
+ }
+ return rc;
+}
+
+/*
+** Create a new BTree table. Write into *piTable the page
+** number for the root page of the new table.
+**
+** The type of type is determined by the flags parameter. Only the
+** following values of flags are currently in use. Other values for
+** flags might not work:
+**
+** BTREE_INTKEY|BTREE_LEAFDATA Used for SQL tables with rowid keys
+** BTREE_ZERODATA Used for SQL indices
+*/
+static int btreeCreateTable(Btree *p, int *piTable, int createTabFlags){
+ BtShared *pBt = p->pBt;
+ MemPage *pRoot;
+ Pgno pgnoRoot;
+ int rc;
+ int ptfFlags; /* Page-type flage for the root page of new table */
+
+ assert( sqlite3BtreeHoldsMutex(p) );
+ assert( pBt->inTransaction==TRANS_WRITE );
+ assert( (pBt->btsFlags & BTS_READ_ONLY)==0 );
+
+#ifdef SQLITE_OMIT_AUTOVACUUM
+ rc = allocateBtreePage(pBt, &pRoot, &pgnoRoot, 1, 0);
+ if( rc ){
+ return rc;
+ }
+#else
+ if( pBt->autoVacuum ){
+ Pgno pgnoMove; /* Move a page here to make room for the root-page */
+ MemPage *pPageMove; /* The page to move to. */
+
+ /* Creating a new table may probably require moving an existing database
+ ** to make room for the new tables root page. In case this page turns
+ ** out to be an overflow page, delete all overflow page-map caches
+ ** held by open cursors.
+ */
+ invalidateAllOverflowCache(pBt);
+
+ /* Read the value of meta[3] from the database to determine where the
+ ** root page of the new table should go. meta[3] is the largest root-page
+ ** created so far, so the new root-page is (meta[3]+1).
+ */
+ sqlite3BtreeGetMeta(p, BTREE_LARGEST_ROOT_PAGE, &pgnoRoot);
+ pgnoRoot++;
+
+ /* The new root-page may not be allocated on a pointer-map page, or the
+ ** PENDING_BYTE page.
+ */
+ while( pgnoRoot==PTRMAP_PAGENO(pBt, pgnoRoot) ||
+ pgnoRoot==PENDING_BYTE_PAGE(pBt) ){
+ pgnoRoot++;
+ }
+ assert( pgnoRoot>=3 || CORRUPT_DB );
+ testcase( pgnoRoot<3 );
+
+ /* Allocate a page. The page that currently resides at pgnoRoot will
+ ** be moved to the allocated page (unless the allocated page happens
+ ** to reside at pgnoRoot).
+ */
+ rc = allocateBtreePage(pBt, &pPageMove, &pgnoMove, pgnoRoot, BTALLOC_EXACT);
+ if( rc!=SQLITE_OK ){
+ return rc;
+ }
+
+ if( pgnoMove!=pgnoRoot ){
+ /* pgnoRoot is the page that will be used for the root-page of
+ ** the new table (assuming an error did not occur). But we were
+ ** allocated pgnoMove. If required (i.e. if it was not allocated
+ ** by extending the file), the current page at position pgnoMove
+ ** is already journaled.
+ */
+ u8 eType = 0;
+ Pgno iPtrPage = 0;
+
+ /* Save the positions of any open cursors. This is required in
+ ** case they are holding a reference to an xFetch reference
+ ** corresponding to page pgnoRoot. */
+ rc = saveAllCursors(pBt, 0, 0);
+ releasePage(pPageMove);
+ if( rc!=SQLITE_OK ){
+ return rc;
+ }
+
+ /* Move the page currently at pgnoRoot to pgnoMove. */
+ rc = btreeGetPage(pBt, pgnoRoot, &pRoot, 0);
+ if( rc!=SQLITE_OK ){
+ return rc;
+ }
+ rc = ptrmapGet(pBt, pgnoRoot, &eType, &iPtrPage);
+ if( eType==PTRMAP_ROOTPAGE || eType==PTRMAP_FREEPAGE ){
+ rc = SQLITE_CORRUPT_BKPT;
+ }
+ if( rc!=SQLITE_OK ){
+ releasePage(pRoot);
+ return rc;
+ }
+ assert( eType!=PTRMAP_ROOTPAGE );
+ assert( eType!=PTRMAP_FREEPAGE );
+ rc = relocatePage(pBt, pRoot, eType, iPtrPage, pgnoMove, 0);
+ releasePage(pRoot);
+
+ /* Obtain the page at pgnoRoot */
+ if( rc!=SQLITE_OK ){
+ return rc;
+ }
+ rc = btreeGetPage(pBt, pgnoRoot, &pRoot, 0);
+ if( rc!=SQLITE_OK ){
+ return rc;
+ }
+ rc = sqlite3PagerWrite(pRoot->pDbPage);
+ if( rc!=SQLITE_OK ){
+ releasePage(pRoot);
+ return rc;
+ }
+ }else{
+ pRoot = pPageMove;
+ }
+
+ /* Update the pointer-map and meta-data with the new root-page number. */
+ ptrmapPut(pBt, pgnoRoot, PTRMAP_ROOTPAGE, 0, &rc);
+ if( rc ){
+ releasePage(pRoot);
+ return rc;
+ }
+
+ /* When the new root page was allocated, page 1 was made writable in
+ ** order either to increase the database filesize, or to decrement the
+ ** freelist count. Hence, the sqlite3BtreeUpdateMeta() call cannot fail.
+ */
+ assert( sqlite3PagerIswriteable(pBt->pPage1->pDbPage) );
+ rc = sqlite3BtreeUpdateMeta(p, 4, pgnoRoot);
+ if( NEVER(rc) ){
+ releasePage(pRoot);
+ return rc;
+ }
+
+ }else{
+ rc = allocateBtreePage(pBt, &pRoot, &pgnoRoot, 1, 0);
+ if( rc ) return rc;
+ }
+#endif
+ assert( sqlite3PagerIswriteable(pRoot->pDbPage) );
+ if( createTabFlags & BTREE_INTKEY ){
+ ptfFlags = PTF_INTKEY | PTF_LEAFDATA | PTF_LEAF;
+ }else{
+ ptfFlags = PTF_ZERODATA | PTF_LEAF;
+ }
+ zeroPage(pRoot, ptfFlags);
+ sqlite3PagerUnref(pRoot->pDbPage);
+ assert( (pBt->openFlags & BTREE_SINGLE)==0 || pgnoRoot==2 );
+ *piTable = (int)pgnoRoot;
+ return SQLITE_OK;
+}
+SQLITE_PRIVATE int sqlite3BtreeCreateTable(Btree *p, int *piTable, int flags){
+ int rc;
+ sqlite3BtreeEnter(p);
+ rc = btreeCreateTable(p, piTable, flags);
+ sqlite3BtreeLeave(p);
+ return rc;
+}
+
+/*
+** Erase the given database page and all its children. Return
+** the page to the freelist.
+*/
+static int clearDatabasePage(
+ BtShared *pBt, /* The BTree that contains the table */
+ Pgno pgno, /* Page number to clear */
+ int freePageFlag, /* Deallocate page if true */
+ int *pnChange /* Add number of Cells freed to this counter */
+){
+ MemPage *pPage;
+ int rc;
+ unsigned char *pCell;
+ int i;
+ int hdr;
+ CellInfo info;
+
+ assert( sqlite3_mutex_held(pBt->mutex) );
+ if( pgno>btreePagecount(pBt) ){
+ return SQLITE_CORRUPT_BKPT;
+ }
+ rc = getAndInitPage(pBt, pgno, &pPage, 0, 0);
+ if( rc ) return rc;
+ if( pPage->bBusy ){
+ rc = SQLITE_CORRUPT_BKPT;
+ goto cleardatabasepage_out;
+ }
+ pPage->bBusy = 1;
+ hdr = pPage->hdrOffset;
+ for(i=0; i<pPage->nCell; i++){
+ pCell = findCell(pPage, i);
+ if( !pPage->leaf ){
+ rc = clearDatabasePage(pBt, get4byte(pCell), 1, pnChange);
+ if( rc ) goto cleardatabasepage_out;
+ }
+ rc = clearCell(pPage, pCell, &info);
+ if( rc ) goto cleardatabasepage_out;
+ }
+ if( !pPage->leaf ){
+ rc = clearDatabasePage(pBt, get4byte(&pPage->aData[hdr+8]), 1, pnChange);
+ if( rc ) goto cleardatabasepage_out;
+ }else if( pnChange ){
+ assert( pPage->intKey || CORRUPT_DB );
+ testcase( !pPage->intKey );
+ *pnChange += pPage->nCell;
+ }
+ if( freePageFlag ){
+ freePage(pPage, &rc);
+ }else if( (rc = sqlite3PagerWrite(pPage->pDbPage))==0 ){
+ zeroPage(pPage, pPage->aData[hdr] | PTF_LEAF);
+ }
+
+cleardatabasepage_out:
+ pPage->bBusy = 0;
+ releasePage(pPage);
+ return rc;
+}
+
+/*
+** Delete all information from a single table in the database. iTable is
+** the page number of the root of the table. After this routine returns,
+** the root page is empty, but still exists.
+**
+** This routine will fail with SQLITE_LOCKED if there are any open
+** read cursors on the table. Open write cursors are moved to the
+** root of the table.
+**
+** If pnChange is not NULL, then table iTable must be an intkey table. The
+** integer value pointed to by pnChange is incremented by the number of
+** entries in the table.
+*/
+SQLITE_PRIVATE int sqlite3BtreeClearTable(Btree *p, int iTable, int *pnChange){
+ int rc;
+ BtShared *pBt = p->pBt;
+ sqlite3BtreeEnter(p);
+ assert( p->inTrans==TRANS_WRITE );
+
+ rc = saveAllCursors(pBt, (Pgno)iTable, 0);
+
+ if( SQLITE_OK==rc ){
+ /* Invalidate all incrblob cursors open on table iTable (assuming iTable
+ ** is the root of a table b-tree - if it is not, the following call is
+ ** a no-op). */
+ invalidateIncrblobCursors(p, 0, 1);
+ rc = clearDatabasePage(pBt, (Pgno)iTable, 0, pnChange);
+ }
+ sqlite3BtreeLeave(p);
+ return rc;
+}
+
+/*
+** Delete all information from the single table that pCur is open on.
+**
+** This routine only work for pCur on an ephemeral table.
+*/
+SQLITE_PRIVATE int sqlite3BtreeClearTableOfCursor(BtCursor *pCur){
+ return sqlite3BtreeClearTable(pCur->pBtree, pCur->pgnoRoot, 0);
+}
+
+/*
+** Erase all information in a table and add the root of the table to
+** the freelist. Except, the root of the principle table (the one on
+** page 1) is never added to the freelist.
+**
+** This routine will fail with SQLITE_LOCKED if there are any open
+** cursors on the table.
+**
+** If AUTOVACUUM is enabled and the page at iTable is not the last
+** root page in the database file, then the last root page
+** in the database file is moved into the slot formerly occupied by
+** iTable and that last slot formerly occupied by the last root page
+** is added to the freelist instead of iTable. In this say, all
+** root pages are kept at the beginning of the database file, which
+** is necessary for AUTOVACUUM to work right. *piMoved is set to the
+** page number that used to be the last root page in the file before
+** the move. If no page gets moved, *piMoved is set to 0.
+** The last root page is recorded in meta[3] and the value of
+** meta[3] is updated by this procedure.
+*/
+static int btreeDropTable(Btree *p, Pgno iTable, int *piMoved){
+ int rc;
+ MemPage *pPage = 0;
+ BtShared *pBt = p->pBt;
+
+ assert( sqlite3BtreeHoldsMutex(p) );
+ assert( p->inTrans==TRANS_WRITE );
+ assert( iTable>=2 );
+
+ rc = btreeGetPage(pBt, (Pgno)iTable, &pPage, 0);
+ if( rc ) return rc;
+ rc = sqlite3BtreeClearTable(p, iTable, 0);
+ if( rc ){
+ releasePage(pPage);
+ return rc;
+ }
+
+ *piMoved = 0;
+
+#ifdef SQLITE_OMIT_AUTOVACUUM
+ freePage(pPage, &rc);
+ releasePage(pPage);
+#else
+ if( pBt->autoVacuum ){
+ Pgno maxRootPgno;
+ sqlite3BtreeGetMeta(p, BTREE_LARGEST_ROOT_PAGE, &maxRootPgno);
+
+ if( iTable==maxRootPgno ){
+ /* If the table being dropped is the table with the largest root-page
+ ** number in the database, put the root page on the free list.
+ */
+ freePage(pPage, &rc);
+ releasePage(pPage);
+ if( rc!=SQLITE_OK ){
+ return rc;
+ }
+ }else{
+ /* The table being dropped does not have the largest root-page
+ ** number in the database. So move the page that does into the
+ ** gap left by the deleted root-page.
+ */
+ MemPage *pMove;
+ releasePage(pPage);
+ rc = btreeGetPage(pBt, maxRootPgno, &pMove, 0);
+ if( rc!=SQLITE_OK ){
+ return rc;
+ }
+ rc = relocatePage(pBt, pMove, PTRMAP_ROOTPAGE, 0, iTable, 0);
+ releasePage(pMove);
+ if( rc!=SQLITE_OK ){
+ return rc;
+ }
+ pMove = 0;
+ rc = btreeGetPage(pBt, maxRootPgno, &pMove, 0);
+ freePage(pMove, &rc);
+ releasePage(pMove);
+ if( rc!=SQLITE_OK ){
+ return rc;
+ }
+ *piMoved = maxRootPgno;
+ }
+
+ /* Set the new 'max-root-page' value in the database header. This
+ ** is the old value less one, less one more if that happens to
+ ** be a root-page number, less one again if that is the
+ ** PENDING_BYTE_PAGE.
+ */
+ maxRootPgno--;
+ while( maxRootPgno==PENDING_BYTE_PAGE(pBt)
+ || PTRMAP_ISPAGE(pBt, maxRootPgno) ){
+ maxRootPgno--;
+ }
+ assert( maxRootPgno!=PENDING_BYTE_PAGE(pBt) );
+
+ rc = sqlite3BtreeUpdateMeta(p, 4, maxRootPgno);
+ }else{
+ freePage(pPage, &rc);
+ releasePage(pPage);
+ }
+#endif
+ return rc;
+}
+SQLITE_PRIVATE int sqlite3BtreeDropTable(Btree *p, int iTable, int *piMoved){
+ int rc;
+ sqlite3BtreeEnter(p);
+ rc = btreeDropTable(p, iTable, piMoved);
+ sqlite3BtreeLeave(p);
+ return rc;
+}
+
+
+/*
+** This function may only be called if the b-tree connection already
+** has a read or write transaction open on the database.
+**
+** Read the meta-information out of a database file. Meta[0]
+** is the number of free pages currently in the database. Meta[1]
+** through meta[15] are available for use by higher layers. Meta[0]
+** is read-only, the others are read/write.
+**
+** The schema layer numbers meta values differently. At the schema
+** layer (and the SetCookie and ReadCookie opcodes) the number of
+** free pages is not visible. So Cookie[0] is the same as Meta[1].
+**
+** This routine treats Meta[BTREE_DATA_VERSION] as a special case. Instead
+** of reading the value out of the header, it instead loads the "DataVersion"
+** from the pager. The BTREE_DATA_VERSION value is not actually stored in the
+** database file. It is a number computed by the pager. But its access
+** pattern is the same as header meta values, and so it is convenient to
+** read it from this routine.
+*/
+SQLITE_PRIVATE void sqlite3BtreeGetMeta(Btree *p, int idx, u32 *pMeta){
+ BtShared *pBt = p->pBt;
+
+ sqlite3BtreeEnter(p);
+ assert( p->inTrans>TRANS_NONE );
+ assert( SQLITE_OK==querySharedCacheTableLock(p, MASTER_ROOT, READ_LOCK) );
+ assert( pBt->pPage1 );
+ assert( idx>=0 && idx<=15 );
+
+ if( idx==BTREE_DATA_VERSION ){
+ *pMeta = sqlite3PagerDataVersion(pBt->pPager) + p->iDataVersion;
+ }else{
+ *pMeta = get4byte(&pBt->pPage1->aData[36 + idx*4]);
+ }
+
+ /* If auto-vacuum is disabled in this build and this is an auto-vacuum
+ ** database, mark the database as read-only. */
+#ifdef SQLITE_OMIT_AUTOVACUUM
+ if( idx==BTREE_LARGEST_ROOT_PAGE && *pMeta>0 ){
+ pBt->btsFlags |= BTS_READ_ONLY;
+ }
+#endif
+
+ sqlite3BtreeLeave(p);
+}
+
+/*
+** Write meta-information back into the database. Meta[0] is
+** read-only and may not be written.
+*/
+SQLITE_PRIVATE int sqlite3BtreeUpdateMeta(Btree *p, int idx, u32 iMeta){
+ BtShared *pBt = p->pBt;
+ unsigned char *pP1;
+ int rc;
+ assert( idx>=1 && idx<=15 );
+ sqlite3BtreeEnter(p);
+ assert( p->inTrans==TRANS_WRITE );
+ assert( pBt->pPage1!=0 );
+ pP1 = pBt->pPage1->aData;
+ rc = sqlite3PagerWrite(pBt->pPage1->pDbPage);
+ if( rc==SQLITE_OK ){
+ put4byte(&pP1[36 + idx*4], iMeta);
+#ifndef SQLITE_OMIT_AUTOVACUUM
+ if( idx==BTREE_INCR_VACUUM ){
+ assert( pBt->autoVacuum || iMeta==0 );
+ assert( iMeta==0 || iMeta==1 );
+ pBt->incrVacuum = (u8)iMeta;
+ }
+#endif
+ }
+ sqlite3BtreeLeave(p);
+ return rc;
+}
+
+#ifndef SQLITE_OMIT_BTREECOUNT
+/*
+** The first argument, pCur, is a cursor opened on some b-tree. Count the
+** number of entries in the b-tree and write the result to *pnEntry.
+**
+** SQLITE_OK is returned if the operation is successfully executed.
+** Otherwise, if an error is encountered (i.e. an IO error or database
+** corruption) an SQLite error code is returned.
+*/
+SQLITE_PRIVATE int sqlite3BtreeCount(BtCursor *pCur, i64 *pnEntry){
+ i64 nEntry = 0; /* Value to return in *pnEntry */
+ int rc; /* Return code */
+
+ if( pCur->pgnoRoot==0 ){
+ *pnEntry = 0;
+ return SQLITE_OK;
+ }
+ rc = moveToRoot(pCur);
+
+ /* Unless an error occurs, the following loop runs one iteration for each
+ ** page in the B-Tree structure (not including overflow pages).
+ */
+ while( rc==SQLITE_OK ){
+ int iIdx; /* Index of child node in parent */
+ MemPage *pPage; /* Current page of the b-tree */
+
+ /* If this is a leaf page or the tree is not an int-key tree, then
+ ** this page contains countable entries. Increment the entry counter
+ ** accordingly.
+ */
+ pPage = pCur->apPage[pCur->iPage];
+ if( pPage->leaf || !pPage->intKey ){
+ nEntry += pPage->nCell;
+ }
+
+ /* pPage is a leaf node. This loop navigates the cursor so that it
+ ** points to the first interior cell that it points to the parent of
+ ** the next page in the tree that has not yet been visited. The
+ ** pCur->aiIdx[pCur->iPage] value is set to the index of the parent cell
+ ** of the page, or to the number of cells in the page if the next page
+ ** to visit is the right-child of its parent.
+ **
+ ** If all pages in the tree have been visited, return SQLITE_OK to the
+ ** caller.
+ */
+ if( pPage->leaf ){
+ do {
+ if( pCur->iPage==0 ){
+ /* All pages of the b-tree have been visited. Return successfully. */
+ *pnEntry = nEntry;
+ return moveToRoot(pCur);
+ }
+ moveToParent(pCur);
+ }while ( pCur->aiIdx[pCur->iPage]>=pCur->apPage[pCur->iPage]->nCell );
+
+ pCur->aiIdx[pCur->iPage]++;
+ pPage = pCur->apPage[pCur->iPage];
+ }
+
+ /* Descend to the child node of the cell that the cursor currently
+ ** points at. This is the right-child if (iIdx==pPage->nCell).
+ */
+ iIdx = pCur->aiIdx[pCur->iPage];
+ if( iIdx==pPage->nCell ){
+ rc = moveToChild(pCur, get4byte(&pPage->aData[pPage->hdrOffset+8]));
+ }else{
+ rc = moveToChild(pCur, get4byte(findCell(pPage, iIdx)));
+ }
+ }
+
+ /* An error has occurred. Return an error code. */
+ return rc;
+}
+#endif
+
+/*
+** Return the pager associated with a BTree. This routine is used for
+** testing and debugging only.
+*/
+SQLITE_PRIVATE Pager *sqlite3BtreePager(Btree *p){
+ return p->pBt->pPager;
+}
+
+#ifndef SQLITE_OMIT_INTEGRITY_CHECK
+/*
+** Append a message to the error message string.
+*/
+static void checkAppendMsg(
+ IntegrityCk *pCheck,
+ const char *zFormat,
+ ...
+){
+ va_list ap;
+ if( !pCheck->mxErr ) return;
+ pCheck->mxErr--;
+ pCheck->nErr++;
+ va_start(ap, zFormat);
+ if( pCheck->errMsg.nChar ){
+ sqlite3StrAccumAppend(&pCheck->errMsg, "\n", 1);
+ }
+ if( pCheck->zPfx ){
+ sqlite3XPrintf(&pCheck->errMsg, pCheck->zPfx, pCheck->v1, pCheck->v2);
+ }
+ sqlite3VXPrintf(&pCheck->errMsg, zFormat, ap);
+ va_end(ap);
+ if( pCheck->errMsg.accError==STRACCUM_NOMEM ){
+ pCheck->mallocFailed = 1;
+ }
+}
+#endif /* SQLITE_OMIT_INTEGRITY_CHECK */
+
+#ifndef SQLITE_OMIT_INTEGRITY_CHECK
+
+/*
+** Return non-zero if the bit in the IntegrityCk.aPgRef[] array that
+** corresponds to page iPg is already set.
+*/
+static int getPageReferenced(IntegrityCk *pCheck, Pgno iPg){
+ assert( iPg<=pCheck->nPage && sizeof(pCheck->aPgRef[0])==1 );
+ return (pCheck->aPgRef[iPg/8] & (1 << (iPg & 0x07)));
+}
+
+/*
+** Set the bit in the IntegrityCk.aPgRef[] array that corresponds to page iPg.
+*/
+static void setPageReferenced(IntegrityCk *pCheck, Pgno iPg){
+ assert( iPg<=pCheck->nPage && sizeof(pCheck->aPgRef[0])==1 );
+ pCheck->aPgRef[iPg/8] |= (1 << (iPg & 0x07));
+}
+
+
+/*
+** Add 1 to the reference count for page iPage. If this is the second
+** reference to the page, add an error message to pCheck->zErrMsg.
+** Return 1 if there are 2 or more references to the page and 0 if
+** if this is the first reference to the page.
+**
+** Also check that the page number is in bounds.
+*/
+static int checkRef(IntegrityCk *pCheck, Pgno iPage){
+ if( iPage==0 ) return 1;
+ if( iPage>pCheck->nPage ){
+ checkAppendMsg(pCheck, "invalid page number %d", iPage);
+ return 1;
+ }
+ if( getPageReferenced(pCheck, iPage) ){
+ checkAppendMsg(pCheck, "2nd reference to page %d", iPage);
+ return 1;
+ }
+ setPageReferenced(pCheck, iPage);
+ return 0;
+}
+
+#ifndef SQLITE_OMIT_AUTOVACUUM
+/*
+** Check that the entry in the pointer-map for page iChild maps to
+** page iParent, pointer type ptrType. If not, append an error message
+** to pCheck.
+*/
+static void checkPtrmap(
+ IntegrityCk *pCheck, /* Integrity check context */
+ Pgno iChild, /* Child page number */
+ u8 eType, /* Expected pointer map type */
+ Pgno iParent /* Expected pointer map parent page number */
+){
+ int rc;
+ u8 ePtrmapType;
+ Pgno iPtrmapParent;
+
+ rc = ptrmapGet(pCheck->pBt, iChild, &ePtrmapType, &iPtrmapParent);
+ if( rc!=SQLITE_OK ){
+ if( rc==SQLITE_NOMEM || rc==SQLITE_IOERR_NOMEM ) pCheck->mallocFailed = 1;
+ checkAppendMsg(pCheck, "Failed to read ptrmap key=%d", iChild);
+ return;
+ }
+
+ if( ePtrmapType!=eType || iPtrmapParent!=iParent ){
+ checkAppendMsg(pCheck,
+ "Bad ptr map entry key=%d expected=(%d,%d) got=(%d,%d)",
+ iChild, eType, iParent, ePtrmapType, iPtrmapParent);
+ }
+}
+#endif
+
+/*
+** Check the integrity of the freelist or of an overflow page list.
+** Verify that the number of pages on the list is N.
+*/
+static void checkList(
+ IntegrityCk *pCheck, /* Integrity checking context */
+ int isFreeList, /* True for a freelist. False for overflow page list */
+ int iPage, /* Page number for first page in the list */
+ int N /* Expected number of pages in the list */
+){
+ int i;
+ int expected = N;
+ int iFirst = iPage;
+ while( N-- > 0 && pCheck->mxErr ){
+ DbPage *pOvflPage;
+ unsigned char *pOvflData;
+ if( iPage<1 ){
+ checkAppendMsg(pCheck,
+ "%d of %d pages missing from overflow list starting at %d",
+ N+1, expected, iFirst);
+ break;
+ }
+ if( checkRef(pCheck, iPage) ) break;
+ if( sqlite3PagerGet(pCheck->pPager, (Pgno)iPage, &pOvflPage, 0) ){
+ checkAppendMsg(pCheck, "failed to get page %d", iPage);
+ break;
+ }
+ pOvflData = (unsigned char *)sqlite3PagerGetData(pOvflPage);
+ if( isFreeList ){
+ int n = get4byte(&pOvflData[4]);
+#ifndef SQLITE_OMIT_AUTOVACUUM
+ if( pCheck->pBt->autoVacuum ){
+ checkPtrmap(pCheck, iPage, PTRMAP_FREEPAGE, 0);
+ }
+#endif
+ if( n>(int)pCheck->pBt->usableSize/4-2 ){
+ checkAppendMsg(pCheck,
+ "freelist leaf count too big on page %d", iPage);
+ N--;
+ }else{
+ for(i=0; i<n; i++){
+ Pgno iFreePage = get4byte(&pOvflData[8+i*4]);
+#ifndef SQLITE_OMIT_AUTOVACUUM
+ if( pCheck->pBt->autoVacuum ){
+ checkPtrmap(pCheck, iFreePage, PTRMAP_FREEPAGE, 0);
+ }
+#endif
+ checkRef(pCheck, iFreePage);
+ }
+ N -= n;
+ }
+ }
+#ifndef SQLITE_OMIT_AUTOVACUUM
+ else{
+ /* If this database supports auto-vacuum and iPage is not the last
+ ** page in this overflow list, check that the pointer-map entry for
+ ** the following page matches iPage.
+ */
+ if( pCheck->pBt->autoVacuum && N>0 ){
+ i = get4byte(pOvflData);
+ checkPtrmap(pCheck, i, PTRMAP_OVERFLOW2, iPage);
+ }
+ }
+#endif
+ iPage = get4byte(pOvflData);
+ sqlite3PagerUnref(pOvflPage);
+
+ if( isFreeList && N<(iPage!=0) ){
+ checkAppendMsg(pCheck, "free-page count in header is too small");
+ }
+ }
+}
+#endif /* SQLITE_OMIT_INTEGRITY_CHECK */
+
+/*
+** An implementation of a min-heap.
+**
+** aHeap[0] is the number of elements on the heap. aHeap[1] is the
+** root element. The daughter nodes of aHeap[N] are aHeap[N*2]
+** and aHeap[N*2+1].
+**
+** The heap property is this: Every node is less than or equal to both
+** of its daughter nodes. A consequence of the heap property is that the
+** root node aHeap[1] is always the minimum value currently in the heap.
+**
+** The btreeHeapInsert() routine inserts an unsigned 32-bit number onto
+** the heap, preserving the heap property. The btreeHeapPull() routine
+** removes the root element from the heap (the minimum value in the heap)
+** and then moves other nodes around as necessary to preserve the heap
+** property.
+**
+** This heap is used for cell overlap and coverage testing. Each u32
+** entry represents the span of a cell or freeblock on a btree page.
+** The upper 16 bits are the index of the first byte of a range and the
+** lower 16 bits are the index of the last byte of that range.
+*/
+static void btreeHeapInsert(u32 *aHeap, u32 x){
+ u32 j, i = ++aHeap[0];
+ aHeap[i] = x;
+ while( (j = i/2)>0 && aHeap[j]>aHeap[i] ){
+ x = aHeap[j];
+ aHeap[j] = aHeap[i];
+ aHeap[i] = x;
+ i = j;
+ }
+}
+static int btreeHeapPull(u32 *aHeap, u32 *pOut){
+ u32 j, i, x;
+ if( (x = aHeap[0])==0 ) return 0;
+ *pOut = aHeap[1];
+ aHeap[1] = aHeap[x];
+ aHeap[x] = 0xffffffff;
+ aHeap[0]--;
+ i = 1;
+ while( (j = i*2)<=aHeap[0] ){
+ if( aHeap[j]>aHeap[j+1] ) j++;
+ if( aHeap[i]<aHeap[j] ) break;
+ x = aHeap[i];
+ aHeap[i] = aHeap[j];
+ aHeap[j] = x;
+ i = j;
+ }
+ return 1;
+}
+
+#ifndef SQLITE_OMIT_INTEGRITY_CHECK
+/*
+** Do various sanity checks on a single page of a tree. Return
+** the tree depth. Root pages return 0. Parents of root pages
+** return 1, and so forth.
+**
+** These checks are done:
+**
+** 1. Make sure that cells and freeblocks do not overlap
+** but combine to completely cover the page.
+** 2. Make sure integer cell keys are in order.
+** 3. Check the integrity of overflow pages.
+** 4. Recursively call checkTreePage on all children.
+** 5. Verify that the depth of all children is the same.
+*/
+static int checkTreePage(
+ IntegrityCk *pCheck, /* Context for the sanity check */
+ int iPage, /* Page number of the page to check */
+ i64 *piMinKey, /* Write minimum integer primary key here */
+ i64 maxKey /* Error if integer primary key greater than this */
+){
+ MemPage *pPage = 0; /* The page being analyzed */
+ int i; /* Loop counter */
+ int rc; /* Result code from subroutine call */
+ int depth = -1, d2; /* Depth of a subtree */
+ int pgno; /* Page number */
+ int nFrag; /* Number of fragmented bytes on the page */
+ int hdr; /* Offset to the page header */
+ int cellStart; /* Offset to the start of the cell pointer array */
+ int nCell; /* Number of cells */
+ int doCoverageCheck = 1; /* True if cell coverage checking should be done */
+ int keyCanBeEqual = 1; /* True if IPK can be equal to maxKey
+ ** False if IPK must be strictly less than maxKey */
+ u8 *data; /* Page content */
+ u8 *pCell; /* Cell content */
+ u8 *pCellIdx; /* Next element of the cell pointer array */
+ BtShared *pBt; /* The BtShared object that owns pPage */
+ u32 pc; /* Address of a cell */
+ u32 usableSize; /* Usable size of the page */
+ u32 contentOffset; /* Offset to the start of the cell content area */
+ u32 *heap = 0; /* Min-heap used for checking cell coverage */
+ u32 x, prev = 0; /* Next and previous entry on the min-heap */
+ const char *saved_zPfx = pCheck->zPfx;
+ int saved_v1 = pCheck->v1;
+ int saved_v2 = pCheck->v2;
+ u8 savedIsInit = 0;
+
+ /* Check that the page exists
+ */
+ pBt = pCheck->pBt;
+ usableSize = pBt->usableSize;
+ if( iPage==0 ) return 0;
+ if( checkRef(pCheck, iPage) ) return 0;
+ pCheck->zPfx = "Page %d: ";
+ pCheck->v1 = iPage;
+ if( (rc = btreeGetPage(pBt, (Pgno)iPage, &pPage, 0))!=0 ){
+ checkAppendMsg(pCheck,
+ "unable to get the page. error code=%d", rc);
+ goto end_of_check;
+ }
+
+ /* Clear MemPage.isInit to make sure the corruption detection code in
+ ** btreeInitPage() is executed. */
+ savedIsInit = pPage->isInit;
+ pPage->isInit = 0;
+ if( (rc = btreeInitPage(pPage))!=0 ){
+ assert( rc==SQLITE_CORRUPT ); /* The only possible error from InitPage */
+ checkAppendMsg(pCheck,
+ "btreeInitPage() returns error code %d", rc);
+ goto end_of_check;
+ }
+ data = pPage->aData;
+ hdr = pPage->hdrOffset;
+
+ /* Set up for cell analysis */
+ pCheck->zPfx = "On tree page %d cell %d: ";
+ contentOffset = get2byteNotZero(&data[hdr+5]);
+ assert( contentOffset<=usableSize ); /* Enforced by btreeInitPage() */
+
+ /* EVIDENCE-OF: R-37002-32774 The two-byte integer at offset 3 gives the
+ ** number of cells on the page. */
+ nCell = get2byte(&data[hdr+3]);
+ assert( pPage->nCell==nCell );
+
+ /* EVIDENCE-OF: R-23882-45353 The cell pointer array of a b-tree page
+ ** immediately follows the b-tree page header. */
+ cellStart = hdr + 12 - 4*pPage->leaf;
+ assert( pPage->aCellIdx==&data[cellStart] );
+ pCellIdx = &data[cellStart + 2*(nCell-1)];
+
+ if( !pPage->leaf ){
+ /* Analyze the right-child page of internal pages */
+ pgno = get4byte(&data[hdr+8]);
+#ifndef SQLITE_OMIT_AUTOVACUUM
+ if( pBt->autoVacuum ){
+ pCheck->zPfx = "On page %d at right child: ";
+ checkPtrmap(pCheck, pgno, PTRMAP_BTREE, iPage);
+ }
+#endif
+ depth = checkTreePage(pCheck, pgno, &maxKey, maxKey);
+ keyCanBeEqual = 0;
+ }else{
+ /* For leaf pages, the coverage check will occur in the same loop
+ ** as the other cell checks, so initialize the heap. */
+ heap = pCheck->heap;
+ heap[0] = 0;
+ }
+
+ /* EVIDENCE-OF: R-02776-14802 The cell pointer array consists of K 2-byte
+ ** integer offsets to the cell contents. */
+ for(i=nCell-1; i>=0 && pCheck->mxErr; i--){
+ CellInfo info;
+
+ /* Check cell size */
+ pCheck->v2 = i;
+ assert( pCellIdx==&data[cellStart + i*2] );
+ pc = get2byteAligned(pCellIdx);
+ pCellIdx -= 2;
+ if( pc<contentOffset || pc>usableSize-4 ){
+ checkAppendMsg(pCheck, "Offset %d out of range %d..%d",
+ pc, contentOffset, usableSize-4);
+ doCoverageCheck = 0;
+ continue;
+ }
+ pCell = &data[pc];
+ pPage->xParseCell(pPage, pCell, &info);
+ if( pc+info.nSize>usableSize ){
+ checkAppendMsg(pCheck, "Extends off end of page");
+ doCoverageCheck = 0;
+ continue;
+ }
+
+ /* Check for integer primary key out of range */
+ if( pPage->intKey ){
+ if( keyCanBeEqual ? (info.nKey > maxKey) : (info.nKey >= maxKey) ){
+ checkAppendMsg(pCheck, "Rowid %lld out of order", info.nKey);
+ }
+ maxKey = info.nKey;
+ }
+
+ /* Check the content overflow list */
+ if( info.nPayload>info.nLocal ){
+ int nPage; /* Number of pages on the overflow chain */
+ Pgno pgnoOvfl; /* First page of the overflow chain */
+ assert( pc + info.nSize - 4 <= usableSize );
+ nPage = (info.nPayload - info.nLocal + usableSize - 5)/(usableSize - 4);
+ pgnoOvfl = get4byte(&pCell[info.nSize - 4]);
+#ifndef SQLITE_OMIT_AUTOVACUUM
+ if( pBt->autoVacuum ){
+ checkPtrmap(pCheck, pgnoOvfl, PTRMAP_OVERFLOW1, iPage);
+ }
+#endif
+ checkList(pCheck, 0, pgnoOvfl, nPage);
+ }
+
+ if( !pPage->leaf ){
+ /* Check sanity of left child page for internal pages */
+ pgno = get4byte(pCell);
+#ifndef SQLITE_OMIT_AUTOVACUUM
+ if( pBt->autoVacuum ){
+ checkPtrmap(pCheck, pgno, PTRMAP_BTREE, iPage);
+ }
+#endif
+ d2 = checkTreePage(pCheck, pgno, &maxKey, maxKey);
+ keyCanBeEqual = 0;
+ if( d2!=depth ){
+ checkAppendMsg(pCheck, "Child page depth differs");
+ depth = d2;
+ }
+ }else{
+ /* Populate the coverage-checking heap for leaf pages */
+ btreeHeapInsert(heap, (pc<<16)|(pc+info.nSize-1));
+ }
+ }
+ *piMinKey = maxKey;
+
+ /* Check for complete coverage of the page
+ */
+ pCheck->zPfx = 0;
+ if( doCoverageCheck && pCheck->mxErr>0 ){
+ /* For leaf pages, the min-heap has already been initialized and the
+ ** cells have already been inserted. But for internal pages, that has
+ ** not yet been done, so do it now */
+ if( !pPage->leaf ){
+ heap = pCheck->heap;
+ heap[0] = 0;
+ for(i=nCell-1; i>=0; i--){
+ u32 size;
+ pc = get2byteAligned(&data[cellStart+i*2]);
+ size = pPage->xCellSize(pPage, &data[pc]);
+ btreeHeapInsert(heap, (pc<<16)|(pc+size-1));
+ }
+ }
+ /* Add the freeblocks to the min-heap
+ **
+ ** EVIDENCE-OF: R-20690-50594 The second field of the b-tree page header
+ ** is the offset of the first freeblock, or zero if there are no
+ ** freeblocks on the page.
+ */
+ i = get2byte(&data[hdr+1]);
+ while( i>0 ){
+ int size, j;
+ assert( (u32)i<=usableSize-4 ); /* Enforced by btreeInitPage() */
+ size = get2byte(&data[i+2]);
+ assert( (u32)(i+size)<=usableSize ); /* Enforced by btreeInitPage() */
+ btreeHeapInsert(heap, (((u32)i)<<16)|(i+size-1));
+ /* EVIDENCE-OF: R-58208-19414 The first 2 bytes of a freeblock are a
+ ** big-endian integer which is the offset in the b-tree page of the next
+ ** freeblock in the chain, or zero if the freeblock is the last on the
+ ** chain. */
+ j = get2byte(&data[i]);
+ /* EVIDENCE-OF: R-06866-39125 Freeblocks are always connected in order of
+ ** increasing offset. */
+ assert( j==0 || j>i+size ); /* Enforced by btreeInitPage() */
+ assert( (u32)j<=usableSize-4 ); /* Enforced by btreeInitPage() */
+ i = j;
+ }
+ /* Analyze the min-heap looking for overlap between cells and/or
+ ** freeblocks, and counting the number of untracked bytes in nFrag.
+ **
+ ** Each min-heap entry is of the form: (start_address<<16)|end_address.
+ ** There is an implied first entry the covers the page header, the cell
+ ** pointer index, and the gap between the cell pointer index and the start
+ ** of cell content.
+ **
+ ** The loop below pulls entries from the min-heap in order and compares
+ ** the start_address against the previous end_address. If there is an
+ ** overlap, that means bytes are used multiple times. If there is a gap,
+ ** that gap is added to the fragmentation count.
+ */
+ nFrag = 0;
+ prev = contentOffset - 1; /* Implied first min-heap entry */
+ while( btreeHeapPull(heap,&x) ){
+ if( (prev&0xffff)>=(x>>16) ){
+ checkAppendMsg(pCheck,
+ "Multiple uses for byte %u of page %d", x>>16, iPage);
+ break;
+ }else{
+ nFrag += (x>>16) - (prev&0xffff) - 1;
+ prev = x;
+ }
+ }
+ nFrag += usableSize - (prev&0xffff) - 1;
+ /* EVIDENCE-OF: R-43263-13491 The total number of bytes in all fragments
+ ** is stored in the fifth field of the b-tree page header.
+ ** EVIDENCE-OF: R-07161-27322 The one-byte integer at offset 7 gives the
+ ** number of fragmented free bytes within the cell content area.
+ */
+ if( heap[0]==0 && nFrag!=data[hdr+7] ){
+ checkAppendMsg(pCheck,
+ "Fragmentation of %d bytes reported as %d on page %d",
+ nFrag, data[hdr+7], iPage);
+ }
+ }
+
+end_of_check:
+ if( !doCoverageCheck ) pPage->isInit = savedIsInit;
+ releasePage(pPage);
+ pCheck->zPfx = saved_zPfx;
+ pCheck->v1 = saved_v1;
+ pCheck->v2 = saved_v2;
+ return depth+1;
+}
+#endif /* SQLITE_OMIT_INTEGRITY_CHECK */
+
+#ifndef SQLITE_OMIT_INTEGRITY_CHECK
+/*
+** This routine does a complete check of the given BTree file. aRoot[] is
+** an array of pages numbers were each page number is the root page of
+** a table. nRoot is the number of entries in aRoot.
+**
+** A read-only or read-write transaction must be opened before calling
+** this function.
+**
+** Write the number of error seen in *pnErr. Except for some memory
+** allocation errors, an error message held in memory obtained from
+** malloc is returned if *pnErr is non-zero. If *pnErr==0 then NULL is
+** returned. If a memory allocation error occurs, NULL is returned.
+*/
+SQLITE_PRIVATE char *sqlite3BtreeIntegrityCheck(
+ Btree *p, /* The btree to be checked */
+ int *aRoot, /* An array of root pages numbers for individual trees */
+ int nRoot, /* Number of entries in aRoot[] */
+ int mxErr, /* Stop reporting errors after this many */
+ int *pnErr /* Write number of errors seen to this variable */
+){
+ Pgno i;
+ IntegrityCk sCheck;
+ BtShared *pBt = p->pBt;
+ int savedDbFlags = pBt->db->flags;
+ char zErr[100];
+ VVA_ONLY( int nRef );
+
+ sqlite3BtreeEnter(p);
+ assert( p->inTrans>TRANS_NONE && pBt->inTransaction>TRANS_NONE );
+ VVA_ONLY( nRef = sqlite3PagerRefcount(pBt->pPager) );
+ assert( nRef>=0 );
+ sCheck.pBt = pBt;
+ sCheck.pPager = pBt->pPager;
+ sCheck.nPage = btreePagecount(sCheck.pBt);
+ sCheck.mxErr = mxErr;
+ sCheck.nErr = 0;
+ sCheck.mallocFailed = 0;
+ sCheck.zPfx = 0;
+ sCheck.v1 = 0;
+ sCheck.v2 = 0;
+ sCheck.aPgRef = 0;
+ sCheck.heap = 0;
+ sqlite3StrAccumInit(&sCheck.errMsg, 0, zErr, sizeof(zErr), SQLITE_MAX_LENGTH);
+ sCheck.errMsg.printfFlags = SQLITE_PRINTF_INTERNAL;
+ if( sCheck.nPage==0 ){
+ goto integrity_ck_cleanup;
+ }
+
+ sCheck.aPgRef = sqlite3MallocZero((sCheck.nPage / 8)+ 1);
+ if( !sCheck.aPgRef ){
+ sCheck.mallocFailed = 1;
+ goto integrity_ck_cleanup;
+ }
+ sCheck.heap = (u32*)sqlite3PageMalloc( pBt->pageSize );
+ if( sCheck.heap==0 ){
+ sCheck.mallocFailed = 1;
+ goto integrity_ck_cleanup;
+ }
+
+ i = PENDING_BYTE_PAGE(pBt);
+ if( i<=sCheck.nPage ) setPageReferenced(&sCheck, i);
+
+ /* Check the integrity of the freelist
+ */
+ sCheck.zPfx = "Main freelist: ";
+ checkList(&sCheck, 1, get4byte(&pBt->pPage1->aData[32]),
+ get4byte(&pBt->pPage1->aData[36]));
+ sCheck.zPfx = 0;
+
+ /* Check all the tables.
+ */
+ testcase( pBt->db->flags & SQLITE_CellSizeCk );
+ pBt->db->flags &= ~SQLITE_CellSizeCk;
+ for(i=0; (int)i<nRoot && sCheck.mxErr; i++){
+ i64 notUsed;
+ if( aRoot[i]==0 ) continue;
+#ifndef SQLITE_OMIT_AUTOVACUUM
+ if( pBt->autoVacuum && aRoot[i]>1 ){
+ checkPtrmap(&sCheck, aRoot[i], PTRMAP_ROOTPAGE, 0);
+ }
+#endif
+ checkTreePage(&sCheck, aRoot[i], &notUsed, LARGEST_INT64);
+ }
+ pBt->db->flags = savedDbFlags;
+
+ /* Make sure every page in the file is referenced
+ */
+ for(i=1; i<=sCheck.nPage && sCheck.mxErr; i++){
+#ifdef SQLITE_OMIT_AUTOVACUUM
+ if( getPageReferenced(&sCheck, i)==0 ){
+ checkAppendMsg(&sCheck, "Page %d is never used", i);
+ }
+#else
+ /* If the database supports auto-vacuum, make sure no tables contain
+ ** references to pointer-map pages.
+ */
+ if( getPageReferenced(&sCheck, i)==0 &&
+ (PTRMAP_PAGENO(pBt, i)!=i || !pBt->autoVacuum) ){
+ checkAppendMsg(&sCheck, "Page %d is never used", i);
+ }
+ if( getPageReferenced(&sCheck, i)!=0 &&
+ (PTRMAP_PAGENO(pBt, i)==i && pBt->autoVacuum) ){
+ checkAppendMsg(&sCheck, "Pointer map page %d is referenced", i);
+ }
+#endif
+ }
+
+ /* Clean up and report errors.
+ */
+integrity_ck_cleanup:
+ sqlite3PageFree(sCheck.heap);
+ sqlite3_free(sCheck.aPgRef);
+ if( sCheck.mallocFailed ){
+ sqlite3StrAccumReset(&sCheck.errMsg);
+ sCheck.nErr++;
+ }
+ *pnErr = sCheck.nErr;
+ if( sCheck.nErr==0 ) sqlite3StrAccumReset(&sCheck.errMsg);
+ /* Make sure this analysis did not leave any unref() pages. */
+ assert( nRef==sqlite3PagerRefcount(pBt->pPager) );
+ sqlite3BtreeLeave(p);
+ return sqlite3StrAccumFinish(&sCheck.errMsg);
+}
+#endif /* SQLITE_OMIT_INTEGRITY_CHECK */
+
+/*
+** Return the full pathname of the underlying database file. Return
+** an empty string if the database is in-memory or a TEMP database.
+**
+** The pager filename is invariant as long as the pager is
+** open so it is safe to access without the BtShared mutex.
+*/
+SQLITE_PRIVATE const char *sqlite3BtreeGetFilename(Btree *p){
+ assert( p->pBt->pPager!=0 );
+ return sqlite3PagerFilename(p->pBt->pPager, 1);
+}
+
+/*
+** Return the pathname of the journal file for this database. The return
+** value of this routine is the same regardless of whether the journal file
+** has been created or not.
+**
+** The pager journal filename is invariant as long as the pager is
+** open so it is safe to access without the BtShared mutex.
+*/
+SQLITE_PRIVATE const char *sqlite3BtreeGetJournalname(Btree *p){
+ assert( p->pBt->pPager!=0 );
+ return sqlite3PagerJournalname(p->pBt->pPager);
+}
+
+/*
+** Return non-zero if a transaction is active.
+*/
+SQLITE_PRIVATE int sqlite3BtreeIsInTrans(Btree *p){
+ assert( p==0 || sqlite3_mutex_held(p->db->mutex) );
+ return (p && (p->inTrans==TRANS_WRITE));
+}
+
+#ifndef SQLITE_OMIT_WAL
+/*
+** Run a checkpoint on the Btree passed as the first argument.
+**
+** Return SQLITE_LOCKED if this or any other connection has an open
+** transaction on the shared-cache the argument Btree is connected to.
+**
+** Parameter eMode is one of SQLITE_CHECKPOINT_PASSIVE, FULL or RESTART.
+*/
+SQLITE_PRIVATE int sqlite3BtreeCheckpoint(Btree *p, int eMode, int *pnLog, int *pnCkpt){
+ int rc = SQLITE_OK;
+ if( p ){
+ BtShared *pBt = p->pBt;
+ sqlite3BtreeEnter(p);
+ if( pBt->inTransaction!=TRANS_NONE ){
+ rc = SQLITE_LOCKED;
+ }else{
+ rc = sqlite3PagerCheckpoint(pBt->pPager, p->db, eMode, pnLog, pnCkpt);
+ }
+ sqlite3BtreeLeave(p);
+ }
+ return rc;
+}
+#endif
+
+/*
+** Return non-zero if a read (or write) transaction is active.
+*/
+SQLITE_PRIVATE int sqlite3BtreeIsInReadTrans(Btree *p){
+ assert( p );
+ assert( sqlite3_mutex_held(p->db->mutex) );
+ return p->inTrans!=TRANS_NONE;
+}
+
+SQLITE_PRIVATE int sqlite3BtreeIsInBackup(Btree *p){
+ assert( p );
+ assert( sqlite3_mutex_held(p->db->mutex) );
+ return p->nBackup!=0;
+}
+
+/*
+** This function returns a pointer to a blob of memory associated with
+** a single shared-btree. The memory is used by client code for its own
+** purposes (for example, to store a high-level schema associated with
+** the shared-btree). The btree layer manages reference counting issues.
+**
+** The first time this is called on a shared-btree, nBytes bytes of memory
+** are allocated, zeroed, and returned to the caller. For each subsequent
+** call the nBytes parameter is ignored and a pointer to the same blob
+** of memory returned.
+**
+** If the nBytes parameter is 0 and the blob of memory has not yet been
+** allocated, a null pointer is returned. If the blob has already been
+** allocated, it is returned as normal.
+**
+** Just before the shared-btree is closed, the function passed as the
+** xFree argument when the memory allocation was made is invoked on the
+** blob of allocated memory. The xFree function should not call sqlite3_free()
+** on the memory, the btree layer does that.
+*/
+SQLITE_PRIVATE void *sqlite3BtreeSchema(Btree *p, int nBytes, void(*xFree)(void *)){
+ BtShared *pBt = p->pBt;
+ sqlite3BtreeEnter(p);
+ if( !pBt->pSchema && nBytes ){
+ pBt->pSchema = sqlite3DbMallocZero(0, nBytes);
+ pBt->xFreeSchema = xFree;
+ }
+ sqlite3BtreeLeave(p);
+ return pBt->pSchema;
+}
+
+/*
+** Return SQLITE_LOCKED_SHAREDCACHE if another user of the same shared
+** btree as the argument handle holds an exclusive lock on the
+** sqlite_master table. Otherwise SQLITE_OK.
+*/
+SQLITE_PRIVATE int sqlite3BtreeSchemaLocked(Btree *p){
+ int rc;
+ assert( sqlite3_mutex_held(p->db->mutex) );
+ sqlite3BtreeEnter(p);
+ rc = querySharedCacheTableLock(p, MASTER_ROOT, READ_LOCK);
+ assert( rc==SQLITE_OK || rc==SQLITE_LOCKED_SHAREDCACHE );
+ sqlite3BtreeLeave(p);
+ return rc;
+}
+
+
+#ifndef SQLITE_OMIT_SHARED_CACHE
+/*
+** Obtain a lock on the table whose root page is iTab. The
+** lock is a write lock if isWritelock is true or a read lock
+** if it is false.
+*/
+SQLITE_PRIVATE int sqlite3BtreeLockTable(Btree *p, int iTab, u8 isWriteLock){
+ int rc = SQLITE_OK;
+ assert( p->inTrans!=TRANS_NONE );
+ if( p->sharable ){
+ u8 lockType = READ_LOCK + isWriteLock;
+ assert( READ_LOCK+1==WRITE_LOCK );
+ assert( isWriteLock==0 || isWriteLock==1 );
+
+ sqlite3BtreeEnter(p);
+ rc = querySharedCacheTableLock(p, iTab, lockType);
+ if( rc==SQLITE_OK ){
+ rc = setSharedCacheTableLock(p, iTab, lockType);
+ }
+ sqlite3BtreeLeave(p);
+ }
+ return rc;
+}
+#endif
+
+#ifndef SQLITE_OMIT_INCRBLOB
+/*
+** Argument pCsr must be a cursor opened for writing on an
+** INTKEY table currently pointing at a valid table entry.
+** This function modifies the data stored as part of that entry.
+**
+** Only the data content may only be modified, it is not possible to
+** change the length of the data stored. If this function is called with
+** parameters that attempt to write past the end of the existing data,
+** no modifications are made and SQLITE_CORRUPT is returned.
+*/
+SQLITE_PRIVATE int sqlite3BtreePutData(BtCursor *pCsr, u32 offset, u32 amt, void *z){
+ int rc;
+ assert( cursorOwnsBtShared(pCsr) );
+ assert( sqlite3_mutex_held(pCsr->pBtree->db->mutex) );
+ assert( pCsr->curFlags & BTCF_Incrblob );
+
+ rc = restoreCursorPosition(pCsr);
+ if( rc!=SQLITE_OK ){
+ return rc;
+ }
+ assert( pCsr->eState!=CURSOR_REQUIRESEEK );
+ if( pCsr->eState!=CURSOR_VALID ){
+ return SQLITE_ABORT;
+ }
+
+ /* Save the positions of all other cursors open on this table. This is
+ ** required in case any of them are holding references to an xFetch
+ ** version of the b-tree page modified by the accessPayload call below.
+ **
+ ** Note that pCsr must be open on a INTKEY table and saveCursorPosition()
+ ** and hence saveAllCursors() cannot fail on a BTREE_INTKEY table, hence
+ ** saveAllCursors can only return SQLITE_OK.
+ */
+ VVA_ONLY(rc =) saveAllCursors(pCsr->pBt, pCsr->pgnoRoot, pCsr);
+ assert( rc==SQLITE_OK );
+
+ /* Check some assumptions:
+ ** (a) the cursor is open for writing,
+ ** (b) there is a read/write transaction open,
+ ** (c) the connection holds a write-lock on the table (if required),
+ ** (d) there are no conflicting read-locks, and
+ ** (e) the cursor points at a valid row of an intKey table.
+ */
+ if( (pCsr->curFlags & BTCF_WriteFlag)==0 ){
+ return SQLITE_READONLY;
+ }
+ assert( (pCsr->pBt->btsFlags & BTS_READ_ONLY)==0
+ && pCsr->pBt->inTransaction==TRANS_WRITE );
+ assert( hasSharedCacheTableLock(pCsr->pBtree, pCsr->pgnoRoot, 0, 2) );
+ assert( !hasReadConflicts(pCsr->pBtree, pCsr->pgnoRoot) );
+ assert( pCsr->apPage[pCsr->iPage]->intKey );
+
+ return accessPayload(pCsr, offset, amt, (unsigned char *)z, 1);
+}
+
+/*
+** Mark this cursor as an incremental blob cursor.
+*/
+SQLITE_PRIVATE void sqlite3BtreeIncrblobCursor(BtCursor *pCur){
+ pCur->curFlags |= BTCF_Incrblob;
+ pCur->pBtree->hasIncrblobCur = 1;
+}
+#endif
+
+/*
+** Set both the "read version" (single byte at byte offset 18) and
+** "write version" (single byte at byte offset 19) fields in the database
+** header to iVersion.
+*/
+SQLITE_PRIVATE int sqlite3BtreeSetVersion(Btree *pBtree, int iVersion){
+ BtShared *pBt = pBtree->pBt;
+ int rc; /* Return code */
+
+ assert( iVersion==1 || iVersion==2 );
+
+ /* If setting the version fields to 1, do not automatically open the
+ ** WAL connection, even if the version fields are currently set to 2.
+ */
+ pBt->btsFlags &= ~BTS_NO_WAL;
+ if( iVersion==1 ) pBt->btsFlags |= BTS_NO_WAL;
+
+ rc = sqlite3BtreeBeginTrans(pBtree, 0);
+ if( rc==SQLITE_OK ){
+ u8 *aData = pBt->pPage1->aData;
+ if( aData[18]!=(u8)iVersion || aData[19]!=(u8)iVersion ){
+ rc = sqlite3BtreeBeginTrans(pBtree, 2);
+ if( rc==SQLITE_OK ){
+ rc = sqlite3PagerWrite(pBt->pPage1->pDbPage);
+ if( rc==SQLITE_OK ){
+ aData[18] = (u8)iVersion;
+ aData[19] = (u8)iVersion;
+ }
+ }
+ }
+ }
+
+ pBt->btsFlags &= ~BTS_NO_WAL;
+ return rc;
+}
+
+/*
+** Return true if the cursor has a hint specified. This routine is
+** only used from within assert() statements
+*/
+SQLITE_PRIVATE int sqlite3BtreeCursorHasHint(BtCursor *pCsr, unsigned int mask){
+ return (pCsr->hints & mask)!=0;
+}
+
+/*
+** Return true if the given Btree is read-only.
+*/
+SQLITE_PRIVATE int sqlite3BtreeIsReadonly(Btree *p){
+ return (p->pBt->btsFlags & BTS_READ_ONLY)!=0;
+}
+
+/*
+** Return the size of the header added to each page by this module.
+*/
+SQLITE_PRIVATE int sqlite3HeaderSizeBtree(void){ return ROUND8(sizeof(MemPage)); }
+
+#if !defined(SQLITE_OMIT_SHARED_CACHE)
+/*
+** Return true if the Btree passed as the only argument is sharable.
+*/
+SQLITE_PRIVATE int sqlite3BtreeSharable(Btree *p){
+ return p->sharable;
+}
+
+/*
+** Return the number of connections to the BtShared object accessed by
+** the Btree handle passed as the only argument. For private caches
+** this is always 1. For shared caches it may be 1 or greater.
+*/
+SQLITE_PRIVATE int sqlite3BtreeConnectionCount(Btree *p){
+ testcase( p->sharable );
+ return p->pBt->nRef;
+}
+#endif
+
+/************** End of btree.c ***********************************************/
+/************** Begin file backup.c ******************************************/
+/*
+** 2009 January 28
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** This file contains the implementation of the sqlite3_backup_XXX()
+** API functions and the related features.
+*/
+/* #include "sqliteInt.h" */
+/* #include "btreeInt.h" */
+
+/*
+** Structure allocated for each backup operation.
+*/
+struct sqlite3_backup {
+ sqlite3* pDestDb; /* Destination database handle */
+ Btree *pDest; /* Destination b-tree file */
+ u32 iDestSchema; /* Original schema cookie in destination */
+ int bDestLocked; /* True once a write-transaction is open on pDest */
+
+ Pgno iNext; /* Page number of the next source page to copy */
+ sqlite3* pSrcDb; /* Source database handle */
+ Btree *pSrc; /* Source b-tree file */
+
+ int rc; /* Backup process error code */
+
+ /* These two variables are set by every call to backup_step(). They are
+ ** read by calls to backup_remaining() and backup_pagecount().
+ */
+ Pgno nRemaining; /* Number of pages left to copy */
+ Pgno nPagecount; /* Total number of pages to copy */
+
+ int isAttached; /* True once backup has been registered with pager */
+ sqlite3_backup *pNext; /* Next backup associated with source pager */
+};
+
+/*
+** THREAD SAFETY NOTES:
+**
+** Once it has been created using backup_init(), a single sqlite3_backup
+** structure may be accessed via two groups of thread-safe entry points:
+**
+** * Via the sqlite3_backup_XXX() API function backup_step() and
+** backup_finish(). Both these functions obtain the source database
+** handle mutex and the mutex associated with the source BtShared
+** structure, in that order.
+**
+** * Via the BackupUpdate() and BackupRestart() functions, which are
+** invoked by the pager layer to report various state changes in
+** the page cache associated with the source database. The mutex
+** associated with the source database BtShared structure will always
+** be held when either of these functions are invoked.
+**
+** The other sqlite3_backup_XXX() API functions, backup_remaining() and
+** backup_pagecount() are not thread-safe functions. If they are called
+** while some other thread is calling backup_step() or backup_finish(),
+** the values returned may be invalid. There is no way for a call to
+** BackupUpdate() or BackupRestart() to interfere with backup_remaining()
+** or backup_pagecount().
+**
+** Depending on the SQLite configuration, the database handles and/or
+** the Btree objects may have their own mutexes that require locking.
+** Non-sharable Btrees (in-memory databases for example), do not have
+** associated mutexes.
+*/
+
+/*
+** Return a pointer corresponding to database zDb (i.e. "main", "temp")
+** in connection handle pDb. If such a database cannot be found, return
+** a NULL pointer and write an error message to pErrorDb.
+**
+** If the "temp" database is requested, it may need to be opened by this
+** function. If an error occurs while doing so, return 0 and write an
+** error message to pErrorDb.
+*/
+static Btree *findBtree(sqlite3 *pErrorDb, sqlite3 *pDb, const char *zDb){
+ int i = sqlite3FindDbName(pDb, zDb);
+
+ if( i==1 ){
+ Parse sParse;
+ int rc = 0;
+ memset(&sParse, 0, sizeof(sParse));
+ sParse.db = pDb;
+ if( sqlite3OpenTempDatabase(&sParse) ){
+ sqlite3ErrorWithMsg(pErrorDb, sParse.rc, "%s", sParse.zErrMsg);
+ rc = SQLITE_ERROR;
+ }
+ sqlite3DbFree(pErrorDb, sParse.zErrMsg);
+ sqlite3ParserReset(&sParse);
+ if( rc ){
+ return 0;
+ }
+ }
+
+ if( i<0 ){
+ sqlite3ErrorWithMsg(pErrorDb, SQLITE_ERROR, "unknown database %s", zDb);
+ return 0;
+ }
+
+ return pDb->aDb[i].pBt;
+}
+
+/*
+** Attempt to set the page size of the destination to match the page size
+** of the source.
+*/
+static int setDestPgsz(sqlite3_backup *p){
+ int rc;
+ rc = sqlite3BtreeSetPageSize(p->pDest,sqlite3BtreeGetPageSize(p->pSrc),-1,0);
+ return rc;
+}
+
+/*
+** Check that there is no open read-transaction on the b-tree passed as the
+** second argument. If there is not, return SQLITE_OK. Otherwise, if there
+** is an open read-transaction, return SQLITE_ERROR and leave an error
+** message in database handle db.
+*/
+static int checkReadTransaction(sqlite3 *db, Btree *p){
+ if( sqlite3BtreeIsInReadTrans(p) ){
+ sqlite3ErrorWithMsg(db, SQLITE_ERROR, "destination database is in use");
+ return SQLITE_ERROR;
+ }
+ return SQLITE_OK;
+}
+
+/*
+** Create an sqlite3_backup process to copy the contents of zSrcDb from
+** connection handle pSrcDb to zDestDb in pDestDb. If successful, return
+** a pointer to the new sqlite3_backup object.
+**
+** If an error occurs, NULL is returned and an error code and error message
+** stored in database handle pDestDb.
+*/
+SQLITE_API sqlite3_backup *sqlite3_backup_init(
+ sqlite3* pDestDb, /* Database to write to */
+ const char *zDestDb, /* Name of database within pDestDb */
+ sqlite3* pSrcDb, /* Database connection to read from */
+ const char *zSrcDb /* Name of database within pSrcDb */
+){
+ sqlite3_backup *p; /* Value to return */
+
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( !sqlite3SafetyCheckOk(pSrcDb)||!sqlite3SafetyCheckOk(pDestDb) ){
+ (void)SQLITE_MISUSE_BKPT;
+ return 0;
+ }
+#endif
+
+ /* Lock the source database handle. The destination database
+ ** handle is not locked in this routine, but it is locked in
+ ** sqlite3_backup_step(). The user is required to ensure that no
+ ** other thread accesses the destination handle for the duration
+ ** of the backup operation. Any attempt to use the destination
+ ** database connection while a backup is in progress may cause
+ ** a malfunction or a deadlock.
+ */
+ sqlite3_mutex_enter(pSrcDb->mutex);
+ sqlite3_mutex_enter(pDestDb->mutex);
+
+ if( pSrcDb==pDestDb ){
+ sqlite3ErrorWithMsg(
+ pDestDb, SQLITE_ERROR, "source and destination must be distinct"
+ );
+ p = 0;
+ }else {
+ /* Allocate space for a new sqlite3_backup object...
+ ** EVIDENCE-OF: R-64852-21591 The sqlite3_backup object is created by a
+ ** call to sqlite3_backup_init() and is destroyed by a call to
+ ** sqlite3_backup_finish(). */
+ p = (sqlite3_backup *)sqlite3MallocZero(sizeof(sqlite3_backup));
+ if( !p ){
+ sqlite3Error(pDestDb, SQLITE_NOMEM_BKPT);
+ }
+ }
+
+ /* If the allocation succeeded, populate the new object. */
+ if( p ){
+ p->pSrc = findBtree(pDestDb, pSrcDb, zSrcDb);
+ p->pDest = findBtree(pDestDb, pDestDb, zDestDb);
+ p->pDestDb = pDestDb;
+ p->pSrcDb = pSrcDb;
+ p->iNext = 1;
+ p->isAttached = 0;
+
+ if( 0==p->pSrc || 0==p->pDest
+ || checkReadTransaction(pDestDb, p->pDest)!=SQLITE_OK
+ ){
+ /* One (or both) of the named databases did not exist or an OOM
+ ** error was hit. Or there is a transaction open on the destination
+ ** database. The error has already been written into the pDestDb
+ ** handle. All that is left to do here is free the sqlite3_backup
+ ** structure. */
+ sqlite3_free(p);
+ p = 0;
+ }
+ }
+ if( p ){
+ p->pSrc->nBackup++;
+ }
+
+ sqlite3_mutex_leave(pDestDb->mutex);
+ sqlite3_mutex_leave(pSrcDb->mutex);
+ return p;
+}
+
+/*
+** Argument rc is an SQLite error code. Return true if this error is
+** considered fatal if encountered during a backup operation. All errors
+** are considered fatal except for SQLITE_BUSY and SQLITE_LOCKED.
+*/
+static int isFatalError(int rc){
+ return (rc!=SQLITE_OK && rc!=SQLITE_BUSY && ALWAYS(rc!=SQLITE_LOCKED));
+}
+
+/*
+** Parameter zSrcData points to a buffer containing the data for
+** page iSrcPg from the source database. Copy this data into the
+** destination database.
+*/
+static int backupOnePage(
+ sqlite3_backup *p, /* Backup handle */
+ Pgno iSrcPg, /* Source database page to backup */
+ const u8 *zSrcData, /* Source database page data */
+ int bUpdate /* True for an update, false otherwise */
+){
+ Pager * const pDestPager = sqlite3BtreePager(p->pDest);
+ const int nSrcPgsz = sqlite3BtreeGetPageSize(p->pSrc);
+ int nDestPgsz = sqlite3BtreeGetPageSize(p->pDest);
+ const int nCopy = MIN(nSrcPgsz, nDestPgsz);
+ const i64 iEnd = (i64)iSrcPg*(i64)nSrcPgsz;
+#ifdef SQLITE_HAS_CODEC
+ /* Use BtreeGetReserveNoMutex() for the source b-tree, as although it is
+ ** guaranteed that the shared-mutex is held by this thread, handle
+ ** p->pSrc may not actually be the owner. */
+ int nSrcReserve = sqlite3BtreeGetReserveNoMutex(p->pSrc);
+ int nDestReserve = sqlite3BtreeGetOptimalReserve(p->pDest);
+#endif
+ int rc = SQLITE_OK;
+ i64 iOff;
+
+ assert( sqlite3BtreeGetReserveNoMutex(p->pSrc)>=0 );
+ assert( p->bDestLocked );
+ assert( !isFatalError(p->rc) );
+ assert( iSrcPg!=PENDING_BYTE_PAGE(p->pSrc->pBt) );
+ assert( zSrcData );
+
+ /* Catch the case where the destination is an in-memory database and the
+ ** page sizes of the source and destination differ.
+ */
+ if( nSrcPgsz!=nDestPgsz && sqlite3PagerIsMemdb(pDestPager) ){
+ rc = SQLITE_READONLY;
+ }
+
+#ifdef SQLITE_HAS_CODEC
+ /* Backup is not possible if the page size of the destination is changing
+ ** and a codec is in use.
+ */
+ if( nSrcPgsz!=nDestPgsz && sqlite3PagerGetCodec(pDestPager)!=0 ){
+ rc = SQLITE_READONLY;
+ }
+
+ /* Backup is not possible if the number of bytes of reserve space differ
+ ** between source and destination. If there is a difference, try to
+ ** fix the destination to agree with the source. If that is not possible,
+ ** then the backup cannot proceed.
+ */
+ if( nSrcReserve!=nDestReserve ){
+ u32 newPgsz = nSrcPgsz;
+ rc = sqlite3PagerSetPagesize(pDestPager, &newPgsz, nSrcReserve);
+ if( rc==SQLITE_OK && newPgsz!=nSrcPgsz ) rc = SQLITE_READONLY;
+ }
+#endif
+
+ /* This loop runs once for each destination page spanned by the source
+ ** page. For each iteration, variable iOff is set to the byte offset
+ ** of the destination page.
+ */
+ for(iOff=iEnd-(i64)nSrcPgsz; rc==SQLITE_OK && iOff<iEnd; iOff+=nDestPgsz){
+ DbPage *pDestPg = 0;
+ Pgno iDest = (Pgno)(iOff/nDestPgsz)+1;
+ if( iDest==PENDING_BYTE_PAGE(p->pDest->pBt) ) continue;
+ if( SQLITE_OK==(rc = sqlite3PagerGet(pDestPager, iDest, &pDestPg, 0))
+ && SQLITE_OK==(rc = sqlite3PagerWrite(pDestPg))
+ ){
+ const u8 *zIn = &zSrcData[iOff%nSrcPgsz];
+ u8 *zDestData = sqlite3PagerGetData(pDestPg);
+ u8 *zOut = &zDestData[iOff%nDestPgsz];
+
+ /* Copy the data from the source page into the destination page.
+ ** Then clear the Btree layer MemPage.isInit flag. Both this module
+ ** and the pager code use this trick (clearing the first byte
+ ** of the page 'extra' space to invalidate the Btree layers
+ ** cached parse of the page). MemPage.isInit is marked
+ ** "MUST BE FIRST" for this purpose.
+ */
+ memcpy(zOut, zIn, nCopy);
+ ((u8 *)sqlite3PagerGetExtra(pDestPg))[0] = 0;
+ if( iOff==0 && bUpdate==0 ){
+ sqlite3Put4byte(&zOut[28], sqlite3BtreeLastPage(p->pSrc));
+ }
+ }
+ sqlite3PagerUnref(pDestPg);
+ }
+
+ return rc;
+}
+
+/*
+** If pFile is currently larger than iSize bytes, then truncate it to
+** exactly iSize bytes. If pFile is not larger than iSize bytes, then
+** this function is a no-op.
+**
+** Return SQLITE_OK if everything is successful, or an SQLite error
+** code if an error occurs.
+*/
+static int backupTruncateFile(sqlite3_file *pFile, i64 iSize){
+ i64 iCurrent;
+ int rc = sqlite3OsFileSize(pFile, &iCurrent);
+ if( rc==SQLITE_OK && iCurrent>iSize ){
+ rc = sqlite3OsTruncate(pFile, iSize);
+ }
+ return rc;
+}
+
+/*
+** Register this backup object with the associated source pager for
+** callbacks when pages are changed or the cache invalidated.
+*/
+static void attachBackupObject(sqlite3_backup *p){
+ sqlite3_backup **pp;
+ assert( sqlite3BtreeHoldsMutex(p->pSrc) );
+ pp = sqlite3PagerBackupPtr(sqlite3BtreePager(p->pSrc));
+ p->pNext = *pp;
+ *pp = p;
+ p->isAttached = 1;
+}
+
+/*
+** Copy nPage pages from the source b-tree to the destination.
+*/
+SQLITE_API int sqlite3_backup_step(sqlite3_backup *p, int nPage){
+ int rc;
+ int destMode; /* Destination journal mode */
+ int pgszSrc = 0; /* Source page size */
+ int pgszDest = 0; /* Destination page size */
+
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( p==0 ) return SQLITE_MISUSE_BKPT;
+#endif
+ sqlite3_mutex_enter(p->pSrcDb->mutex);
+ sqlite3BtreeEnter(p->pSrc);
+ if( p->pDestDb ){
+ sqlite3_mutex_enter(p->pDestDb->mutex);
+ }
+
+ rc = p->rc;
+ if( !isFatalError(rc) ){
+ Pager * const pSrcPager = sqlite3BtreePager(p->pSrc); /* Source pager */
+ Pager * const pDestPager = sqlite3BtreePager(p->pDest); /* Dest pager */
+ int ii; /* Iterator variable */
+ int nSrcPage = -1; /* Size of source db in pages */
+ int bCloseTrans = 0; /* True if src db requires unlocking */
+
+ /* If the source pager is currently in a write-transaction, return
+ ** SQLITE_BUSY immediately.
+ */
+ if( p->pDestDb && p->pSrc->pBt->inTransaction==TRANS_WRITE ){
+ rc = SQLITE_BUSY;
+ }else{
+ rc = SQLITE_OK;
+ }
+
+ /* If there is no open read-transaction on the source database, open
+ ** one now. If a transaction is opened here, then it will be closed
+ ** before this function exits.
+ */
+ if( rc==SQLITE_OK && 0==sqlite3BtreeIsInReadTrans(p->pSrc) ){
+ rc = sqlite3BtreeBeginTrans(p->pSrc, 0);
+ bCloseTrans = 1;
+ }
+
+ /* If the destination database has not yet been locked (i.e. if this
+ ** is the first call to backup_step() for the current backup operation),
+ ** try to set its page size to the same as the source database. This
+ ** is especially important on ZipVFS systems, as in that case it is
+ ** not possible to create a database file that uses one page size by
+ ** writing to it with another. */
+ if( p->bDestLocked==0 && rc==SQLITE_OK && setDestPgsz(p)==SQLITE_NOMEM ){
+ rc = SQLITE_NOMEM;
+ }
+
+ /* Lock the destination database, if it is not locked already. */
+ if( SQLITE_OK==rc && p->bDestLocked==0
+ && SQLITE_OK==(rc = sqlite3BtreeBeginTrans(p->pDest, 2))
+ ){
+ p->bDestLocked = 1;
+ sqlite3BtreeGetMeta(p->pDest, BTREE_SCHEMA_VERSION, &p->iDestSchema);
+ }
+
+ /* Do not allow backup if the destination database is in WAL mode
+ ** and the page sizes are different between source and destination */
+ pgszSrc = sqlite3BtreeGetPageSize(p->pSrc);
+ pgszDest = sqlite3BtreeGetPageSize(p->pDest);
+ destMode = sqlite3PagerGetJournalMode(sqlite3BtreePager(p->pDest));
+ if( SQLITE_OK==rc && destMode==PAGER_JOURNALMODE_WAL && pgszSrc!=pgszDest ){
+ rc = SQLITE_READONLY;
+ }
+
+ /* Now that there is a read-lock on the source database, query the
+ ** source pager for the number of pages in the database.
+ */
+ nSrcPage = (int)sqlite3BtreeLastPage(p->pSrc);
+ assert( nSrcPage>=0 );
+ for(ii=0; (nPage<0 || ii<nPage) && p->iNext<=(Pgno)nSrcPage && !rc; ii++){
+ const Pgno iSrcPg = p->iNext; /* Source page number */
+ if( iSrcPg!=PENDING_BYTE_PAGE(p->pSrc->pBt) ){
+ DbPage *pSrcPg; /* Source page object */
+ rc = sqlite3PagerGet(pSrcPager, iSrcPg, &pSrcPg,PAGER_GET_READONLY);
+ if( rc==SQLITE_OK ){
+ rc = backupOnePage(p, iSrcPg, sqlite3PagerGetData(pSrcPg), 0);
+ sqlite3PagerUnref(pSrcPg);
+ }
+ }
+ p->iNext++;
+ }
+ if( rc==SQLITE_OK ){
+ p->nPagecount = nSrcPage;
+ p->nRemaining = nSrcPage+1-p->iNext;
+ if( p->iNext>(Pgno)nSrcPage ){
+ rc = SQLITE_DONE;
+ }else if( !p->isAttached ){
+ attachBackupObject(p);
+ }
+ }
+
+ /* Update the schema version field in the destination database. This
+ ** is to make sure that the schema-version really does change in
+ ** the case where the source and destination databases have the
+ ** same schema version.
+ */
+ if( rc==SQLITE_DONE ){
+ if( nSrcPage==0 ){
+ rc = sqlite3BtreeNewDb(p->pDest);
+ nSrcPage = 1;
+ }
+ if( rc==SQLITE_OK || rc==SQLITE_DONE ){
+ rc = sqlite3BtreeUpdateMeta(p->pDest,1,p->iDestSchema+1);
+ }
+ if( rc==SQLITE_OK ){
+ if( p->pDestDb ){
+ sqlite3ResetAllSchemasOfConnection(p->pDestDb);
+ }
+ if( destMode==PAGER_JOURNALMODE_WAL ){
+ rc = sqlite3BtreeSetVersion(p->pDest, 2);
+ }
+ }
+ if( rc==SQLITE_OK ){
+ int nDestTruncate;
+ /* Set nDestTruncate to the final number of pages in the destination
+ ** database. The complication here is that the destination page
+ ** size may be different to the source page size.
+ **
+ ** If the source page size is smaller than the destination page size,
+ ** round up. In this case the call to sqlite3OsTruncate() below will
+ ** fix the size of the file. However it is important to call
+ ** sqlite3PagerTruncateImage() here so that any pages in the
+ ** destination file that lie beyond the nDestTruncate page mark are
+ ** journalled by PagerCommitPhaseOne() before they are destroyed
+ ** by the file truncation.
+ */
+ assert( pgszSrc==sqlite3BtreeGetPageSize(p->pSrc) );
+ assert( pgszDest==sqlite3BtreeGetPageSize(p->pDest) );
+ if( pgszSrc<pgszDest ){
+ int ratio = pgszDest/pgszSrc;
+ nDestTruncate = (nSrcPage+ratio-1)/ratio;
+ if( nDestTruncate==(int)PENDING_BYTE_PAGE(p->pDest->pBt) ){
+ nDestTruncate--;
+ }
+ }else{
+ nDestTruncate = nSrcPage * (pgszSrc/pgszDest);
+ }
+ assert( nDestTruncate>0 );
+
+ if( pgszSrc<pgszDest ){
+ /* If the source page-size is smaller than the destination page-size,
+ ** two extra things may need to happen:
+ **
+ ** * The destination may need to be truncated, and
+ **
+ ** * Data stored on the pages immediately following the
+ ** pending-byte page in the source database may need to be
+ ** copied into the destination database.
+ */
+ const i64 iSize = (i64)pgszSrc * (i64)nSrcPage;
+ sqlite3_file * const pFile = sqlite3PagerFile(pDestPager);
+ Pgno iPg;
+ int nDstPage;
+ i64 iOff;
+ i64 iEnd;
+
+ assert( pFile );
+ assert( nDestTruncate==0
+ || (i64)nDestTruncate*(i64)pgszDest >= iSize || (
+ nDestTruncate==(int)(PENDING_BYTE_PAGE(p->pDest->pBt)-1)
+ && iSize>=PENDING_BYTE && iSize<=PENDING_BYTE+pgszDest
+ ));
+
+ /* This block ensures that all data required to recreate the original
+ ** database has been stored in the journal for pDestPager and the
+ ** journal synced to disk. So at this point we may safely modify
+ ** the database file in any way, knowing that if a power failure
+ ** occurs, the original database will be reconstructed from the
+ ** journal file. */
+ sqlite3PagerPagecount(pDestPager, &nDstPage);
+ for(iPg=nDestTruncate; rc==SQLITE_OK && iPg<=(Pgno)nDstPage; iPg++){
+ if( iPg!=PENDING_BYTE_PAGE(p->pDest->pBt) ){
+ DbPage *pPg;
+ rc = sqlite3PagerGet(pDestPager, iPg, &pPg, 0);
+ if( rc==SQLITE_OK ){
+ rc = sqlite3PagerWrite(pPg);
+ sqlite3PagerUnref(pPg);
+ }
+ }
+ }
+ if( rc==SQLITE_OK ){
+ rc = sqlite3PagerCommitPhaseOne(pDestPager, 0, 1);
+ }
+
+ /* Write the extra pages and truncate the database file as required */
+ iEnd = MIN(PENDING_BYTE + pgszDest, iSize);
+ for(
+ iOff=PENDING_BYTE+pgszSrc;
+ rc==SQLITE_OK && iOff<iEnd;
+ iOff+=pgszSrc
+ ){
+ PgHdr *pSrcPg = 0;
+ const Pgno iSrcPg = (Pgno)((iOff/pgszSrc)+1);
+ rc = sqlite3PagerGet(pSrcPager, iSrcPg, &pSrcPg, 0);
+ if( rc==SQLITE_OK ){
+ u8 *zData = sqlite3PagerGetData(pSrcPg);
+ rc = sqlite3OsWrite(pFile, zData, pgszSrc, iOff);
+ }
+ sqlite3PagerUnref(pSrcPg);
+ }
+ if( rc==SQLITE_OK ){
+ rc = backupTruncateFile(pFile, iSize);
+ }
+
+ /* Sync the database file to disk. */
+ if( rc==SQLITE_OK ){
+ rc = sqlite3PagerSync(pDestPager, 0);
+ }
+ }else{
+ sqlite3PagerTruncateImage(pDestPager, nDestTruncate);
+ rc = sqlite3PagerCommitPhaseOne(pDestPager, 0, 0);
+ }
+
+ /* Finish committing the transaction to the destination database. */
+ if( SQLITE_OK==rc
+ && SQLITE_OK==(rc = sqlite3BtreeCommitPhaseTwo(p->pDest, 0))
+ ){
+ rc = SQLITE_DONE;
+ }
+ }
+ }
+
+ /* If bCloseTrans is true, then this function opened a read transaction
+ ** on the source database. Close the read transaction here. There is
+ ** no need to check the return values of the btree methods here, as
+ ** "committing" a read-only transaction cannot fail.
+ */
+ if( bCloseTrans ){
+ TESTONLY( int rc2 );
+ TESTONLY( rc2 = ) sqlite3BtreeCommitPhaseOne(p->pSrc, 0);
+ TESTONLY( rc2 |= ) sqlite3BtreeCommitPhaseTwo(p->pSrc, 0);
+ assert( rc2==SQLITE_OK );
+ }
+
+ if( rc==SQLITE_IOERR_NOMEM ){
+ rc = SQLITE_NOMEM_BKPT;
+ }
+ p->rc = rc;
+ }
+ if( p->pDestDb ){
+ sqlite3_mutex_leave(p->pDestDb->mutex);
+ }
+ sqlite3BtreeLeave(p->pSrc);
+ sqlite3_mutex_leave(p->pSrcDb->mutex);
+ return rc;
+}
+
+/*
+** Release all resources associated with an sqlite3_backup* handle.
+*/
+SQLITE_API int sqlite3_backup_finish(sqlite3_backup *p){
+ sqlite3_backup **pp; /* Ptr to head of pagers backup list */
+ sqlite3 *pSrcDb; /* Source database connection */
+ int rc; /* Value to return */
+
+ /* Enter the mutexes */
+ if( p==0 ) return SQLITE_OK;
+ pSrcDb = p->pSrcDb;
+ sqlite3_mutex_enter(pSrcDb->mutex);
+ sqlite3BtreeEnter(p->pSrc);
+ if( p->pDestDb ){
+ sqlite3_mutex_enter(p->pDestDb->mutex);
+ }
+
+ /* Detach this backup from the source pager. */
+ if( p->pDestDb ){
+ p->pSrc->nBackup--;
+ }
+ if( p->isAttached ){
+ pp = sqlite3PagerBackupPtr(sqlite3BtreePager(p->pSrc));
+ while( *pp!=p ){
+ pp = &(*pp)->pNext;
+ }
+ *pp = p->pNext;
+ }
+
+ /* If a transaction is still open on the Btree, roll it back. */
+ sqlite3BtreeRollback(p->pDest, SQLITE_OK, 0);
+
+ /* Set the error code of the destination database handle. */
+ rc = (p->rc==SQLITE_DONE) ? SQLITE_OK : p->rc;
+ if( p->pDestDb ){
+ sqlite3Error(p->pDestDb, rc);
+
+ /* Exit the mutexes and free the backup context structure. */
+ sqlite3LeaveMutexAndCloseZombie(p->pDestDb);
+ }
+ sqlite3BtreeLeave(p->pSrc);
+ if( p->pDestDb ){
+ /* EVIDENCE-OF: R-64852-21591 The sqlite3_backup object is created by a
+ ** call to sqlite3_backup_init() and is destroyed by a call to
+ ** sqlite3_backup_finish(). */
+ sqlite3_free(p);
+ }
+ sqlite3LeaveMutexAndCloseZombie(pSrcDb);
+ return rc;
+}
+
+/*
+** Return the number of pages still to be backed up as of the most recent
+** call to sqlite3_backup_step().
+*/
+SQLITE_API int sqlite3_backup_remaining(sqlite3_backup *p){
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( p==0 ){
+ (void)SQLITE_MISUSE_BKPT;
+ return 0;
+ }
+#endif
+ return p->nRemaining;
+}
+
+/*
+** Return the total number of pages in the source database as of the most
+** recent call to sqlite3_backup_step().
+*/
+SQLITE_API int sqlite3_backup_pagecount(sqlite3_backup *p){
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( p==0 ){
+ (void)SQLITE_MISUSE_BKPT;
+ return 0;
+ }
+#endif
+ return p->nPagecount;
+}
+
+/*
+** This function is called after the contents of page iPage of the
+** source database have been modified. If page iPage has already been
+** copied into the destination database, then the data written to the
+** destination is now invalidated. The destination copy of iPage needs
+** to be updated with the new data before the backup operation is
+** complete.
+**
+** It is assumed that the mutex associated with the BtShared object
+** corresponding to the source database is held when this function is
+** called.
+*/
+static SQLITE_NOINLINE void backupUpdate(
+ sqlite3_backup *p,
+ Pgno iPage,
+ const u8 *aData
+){
+ assert( p!=0 );
+ do{
+ assert( sqlite3_mutex_held(p->pSrc->pBt->mutex) );
+ if( !isFatalError(p->rc) && iPage<p->iNext ){
+ /* The backup process p has already copied page iPage. But now it
+ ** has been modified by a transaction on the source pager. Copy
+ ** the new data into the backup.
+ */
+ int rc;
+ assert( p->pDestDb );
+ sqlite3_mutex_enter(p->pDestDb->mutex);
+ rc = backupOnePage(p, iPage, aData, 1);
+ sqlite3_mutex_leave(p->pDestDb->mutex);
+ assert( rc!=SQLITE_BUSY && rc!=SQLITE_LOCKED );
+ if( rc!=SQLITE_OK ){
+ p->rc = rc;
+ }
+ }
+ }while( (p = p->pNext)!=0 );
+}
+SQLITE_PRIVATE void sqlite3BackupUpdate(sqlite3_backup *pBackup, Pgno iPage, const u8 *aData){
+ if( pBackup ) backupUpdate(pBackup, iPage, aData);
+}
+
+/*
+** Restart the backup process. This is called when the pager layer
+** detects that the database has been modified by an external database
+** connection. In this case there is no way of knowing which of the
+** pages that have been copied into the destination database are still
+** valid and which are not, so the entire process needs to be restarted.
+**
+** It is assumed that the mutex associated with the BtShared object
+** corresponding to the source database is held when this function is
+** called.
+*/
+SQLITE_PRIVATE void sqlite3BackupRestart(sqlite3_backup *pBackup){
+ sqlite3_backup *p; /* Iterator variable */
+ for(p=pBackup; p; p=p->pNext){
+ assert( sqlite3_mutex_held(p->pSrc->pBt->mutex) );
+ p->iNext = 1;
+ }
+}
+
+#ifndef SQLITE_OMIT_VACUUM
+/*
+** Copy the complete content of pBtFrom into pBtTo. A transaction
+** must be active for both files.
+**
+** The size of file pTo may be reduced by this operation. If anything
+** goes wrong, the transaction on pTo is rolled back. If successful, the
+** transaction is committed before returning.
+*/
+SQLITE_PRIVATE int sqlite3BtreeCopyFile(Btree *pTo, Btree *pFrom){
+ int rc;
+ sqlite3_file *pFd; /* File descriptor for database pTo */
+ sqlite3_backup b;
+ sqlite3BtreeEnter(pTo);
+ sqlite3BtreeEnter(pFrom);
+
+ assert( sqlite3BtreeIsInTrans(pTo) );
+ pFd = sqlite3PagerFile(sqlite3BtreePager(pTo));
+ if( pFd->pMethods ){
+ i64 nByte = sqlite3BtreeGetPageSize(pFrom)*(i64)sqlite3BtreeLastPage(pFrom);
+ rc = sqlite3OsFileControl(pFd, SQLITE_FCNTL_OVERWRITE, &nByte);
+ if( rc==SQLITE_NOTFOUND ) rc = SQLITE_OK;
+ if( rc ) goto copy_finished;
+ }
+
+ /* Set up an sqlite3_backup object. sqlite3_backup.pDestDb must be set
+ ** to 0. This is used by the implementations of sqlite3_backup_step()
+ ** and sqlite3_backup_finish() to detect that they are being called
+ ** from this function, not directly by the user.
+ */
+ memset(&b, 0, sizeof(b));
+ b.pSrcDb = pFrom->db;
+ b.pSrc = pFrom;
+ b.pDest = pTo;
+ b.iNext = 1;
+
+#ifdef SQLITE_HAS_CODEC
+ sqlite3PagerAlignReserve(sqlite3BtreePager(pTo), sqlite3BtreePager(pFrom));
+#endif
+
+ /* 0x7FFFFFFF is the hard limit for the number of pages in a database
+ ** file. By passing this as the number of pages to copy to
+ ** sqlite3_backup_step(), we can guarantee that the copy finishes
+ ** within a single call (unless an error occurs). The assert() statement
+ ** checks this assumption - (p->rc) should be set to either SQLITE_DONE
+ ** or an error code. */
+ sqlite3_backup_step(&b, 0x7FFFFFFF);
+ assert( b.rc!=SQLITE_OK );
+
+ rc = sqlite3_backup_finish(&b);
+ if( rc==SQLITE_OK ){
+ pTo->pBt->btsFlags &= ~BTS_PAGESIZE_FIXED;
+ }else{
+ sqlite3PagerClearCache(sqlite3BtreePager(b.pDest));
+ }
+
+ assert( sqlite3BtreeIsInTrans(pTo)==0 );
+copy_finished:
+ sqlite3BtreeLeave(pFrom);
+ sqlite3BtreeLeave(pTo);
+ return rc;
+}
+#endif /* SQLITE_OMIT_VACUUM */
+
+/************** End of backup.c **********************************************/
+/************** Begin file vdbemem.c *****************************************/
+/*
+** 2004 May 26
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+**
+** This file contains code use to manipulate "Mem" structure. A "Mem"
+** stores a single value in the VDBE. Mem is an opaque structure visible
+** only within the VDBE. Interface routines refer to a Mem using the
+** name sqlite_value
+*/
+/* #include "sqliteInt.h" */
+/* #include "vdbeInt.h" */
+
+#ifdef SQLITE_DEBUG
+/*
+** Check invariants on a Mem object.
+**
+** This routine is intended for use inside of assert() statements, like
+** this: assert( sqlite3VdbeCheckMemInvariants(pMem) );
+*/
+SQLITE_PRIVATE int sqlite3VdbeCheckMemInvariants(Mem *p){
+ /* If MEM_Dyn is set then Mem.xDel!=0.
+ ** Mem.xDel is might not be initialized if MEM_Dyn is clear.
+ */
+ assert( (p->flags & MEM_Dyn)==0 || p->xDel!=0 );
+
+ /* MEM_Dyn may only be set if Mem.szMalloc==0. In this way we
+ ** ensure that if Mem.szMalloc>0 then it is safe to do
+ ** Mem.z = Mem.zMalloc without having to check Mem.flags&MEM_Dyn.
+ ** That saves a few cycles in inner loops. */
+ assert( (p->flags & MEM_Dyn)==0 || p->szMalloc==0 );
+
+ /* Cannot be both MEM_Int and MEM_Real at the same time */
+ assert( (p->flags & (MEM_Int|MEM_Real))!=(MEM_Int|MEM_Real) );
+
+ /* The szMalloc field holds the correct memory allocation size */
+ assert( p->szMalloc==0
+ || p->szMalloc==sqlite3DbMallocSize(p->db,p->zMalloc) );
+
+ /* If p holds a string or blob, the Mem.z must point to exactly
+ ** one of the following:
+ **
+ ** (1) Memory in Mem.zMalloc and managed by the Mem object
+ ** (2) Memory to be freed using Mem.xDel
+ ** (3) An ephemeral string or blob
+ ** (4) A static string or blob
+ */
+ if( (p->flags & (MEM_Str|MEM_Blob)) && p->n>0 ){
+ assert(
+ ((p->szMalloc>0 && p->z==p->zMalloc)? 1 : 0) +
+ ((p->flags&MEM_Dyn)!=0 ? 1 : 0) +
+ ((p->flags&MEM_Ephem)!=0 ? 1 : 0) +
+ ((p->flags&MEM_Static)!=0 ? 1 : 0) == 1
+ );
+ }
+ return 1;
+}
+#endif
+
+
+/*
+** If pMem is an object with a valid string representation, this routine
+** ensures the internal encoding for the string representation is
+** 'desiredEnc', one of SQLITE_UTF8, SQLITE_UTF16LE or SQLITE_UTF16BE.
+**
+** If pMem is not a string object, or the encoding of the string
+** representation is already stored using the requested encoding, then this
+** routine is a no-op.
+**
+** SQLITE_OK is returned if the conversion is successful (or not required).
+** SQLITE_NOMEM may be returned if a malloc() fails during conversion
+** between formats.
+*/
+SQLITE_PRIVATE int sqlite3VdbeChangeEncoding(Mem *pMem, int desiredEnc){
+#ifndef SQLITE_OMIT_UTF16
+ int rc;
+#endif
+ assert( (pMem->flags&MEM_RowSet)==0 );
+ assert( desiredEnc==SQLITE_UTF8 || desiredEnc==SQLITE_UTF16LE
+ || desiredEnc==SQLITE_UTF16BE );
+ if( !(pMem->flags&MEM_Str) || pMem->enc==desiredEnc ){
+ return SQLITE_OK;
+ }
+ assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) );
+#ifdef SQLITE_OMIT_UTF16
+ return SQLITE_ERROR;
+#else
+
+ /* MemTranslate() may return SQLITE_OK or SQLITE_NOMEM. If NOMEM is returned,
+ ** then the encoding of the value may not have changed.
+ */
+ rc = sqlite3VdbeMemTranslate(pMem, (u8)desiredEnc);
+ assert(rc==SQLITE_OK || rc==SQLITE_NOMEM);
+ assert(rc==SQLITE_OK || pMem->enc!=desiredEnc);
+ assert(rc==SQLITE_NOMEM || pMem->enc==desiredEnc);
+ return rc;
+#endif
+}
+
+/*
+** Make sure pMem->z points to a writable allocation of at least
+** min(n,32) bytes.
+**
+** If the bPreserve argument is true, then copy of the content of
+** pMem->z into the new allocation. pMem must be either a string or
+** blob if bPreserve is true. If bPreserve is false, any prior content
+** in pMem->z is discarded.
+*/
+SQLITE_PRIVATE SQLITE_NOINLINE int sqlite3VdbeMemGrow(Mem *pMem, int n, int bPreserve){
+ assert( sqlite3VdbeCheckMemInvariants(pMem) );
+ assert( (pMem->flags&MEM_RowSet)==0 );
+ testcase( pMem->db==0 );
+
+ /* If the bPreserve flag is set to true, then the memory cell must already
+ ** contain a valid string or blob value. */
+ assert( bPreserve==0 || pMem->flags&(MEM_Blob|MEM_Str) );
+ testcase( bPreserve && pMem->z==0 );
+
+ assert( pMem->szMalloc==0
+ || pMem->szMalloc==sqlite3DbMallocSize(pMem->db, pMem->zMalloc) );
+ if( pMem->szMalloc<n ){
+ if( n<32 ) n = 32;
+ if( bPreserve && pMem->szMalloc>0 && pMem->z==pMem->zMalloc ){
+ pMem->z = pMem->zMalloc = sqlite3DbReallocOrFree(pMem->db, pMem->z, n);
+ bPreserve = 0;
+ }else{
+ if( pMem->szMalloc>0 ) sqlite3DbFree(pMem->db, pMem->zMalloc);
+ pMem->zMalloc = sqlite3DbMallocRaw(pMem->db, n);
+ }
+ if( pMem->zMalloc==0 ){
+ sqlite3VdbeMemSetNull(pMem);
+ pMem->z = 0;
+ pMem->szMalloc = 0;
+ return SQLITE_NOMEM_BKPT;
+ }else{
+ pMem->szMalloc = sqlite3DbMallocSize(pMem->db, pMem->zMalloc);
+ }
+ }
+
+ if( bPreserve && pMem->z && pMem->z!=pMem->zMalloc ){
+ memcpy(pMem->zMalloc, pMem->z, pMem->n);
+ }
+ if( (pMem->flags&MEM_Dyn)!=0 ){
+ assert( pMem->xDel!=0 && pMem->xDel!=SQLITE_DYNAMIC );
+ pMem->xDel((void *)(pMem->z));
+ }
+
+ pMem->z = pMem->zMalloc;
+ pMem->flags &= ~(MEM_Dyn|MEM_Ephem|MEM_Static);
+ return SQLITE_OK;
+}
+
+/*
+** Change the pMem->zMalloc allocation to be at least szNew bytes.
+** If pMem->zMalloc already meets or exceeds the requested size, this
+** routine is a no-op.
+**
+** Any prior string or blob content in the pMem object may be discarded.
+** The pMem->xDel destructor is called, if it exists. Though MEM_Str
+** and MEM_Blob values may be discarded, MEM_Int, MEM_Real, and MEM_Null
+** values are preserved.
+**
+** Return SQLITE_OK on success or an error code (probably SQLITE_NOMEM)
+** if unable to complete the resizing.
+*/
+SQLITE_PRIVATE int sqlite3VdbeMemClearAndResize(Mem *pMem, int szNew){
+ assert( szNew>0 );
+ assert( (pMem->flags & MEM_Dyn)==0 || pMem->szMalloc==0 );
+ if( pMem->szMalloc<szNew ){
+ return sqlite3VdbeMemGrow(pMem, szNew, 0);
+ }
+ assert( (pMem->flags & MEM_Dyn)==0 );
+ pMem->z = pMem->zMalloc;
+ pMem->flags &= (MEM_Null|MEM_Int|MEM_Real);
+ return SQLITE_OK;
+}
+
+/*
+** Change pMem so that its MEM_Str or MEM_Blob value is stored in
+** MEM.zMalloc, where it can be safely written.
+**
+** Return SQLITE_OK on success or SQLITE_NOMEM if malloc fails.
+*/
+SQLITE_PRIVATE int sqlite3VdbeMemMakeWriteable(Mem *pMem){
+ assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) );
+ assert( (pMem->flags&MEM_RowSet)==0 );
+ if( (pMem->flags & (MEM_Str|MEM_Blob))!=0 ){
+ if( ExpandBlob(pMem) ) return SQLITE_NOMEM;
+ if( pMem->szMalloc==0 || pMem->z!=pMem->zMalloc ){
+ if( sqlite3VdbeMemGrow(pMem, pMem->n + 2, 1) ){
+ return SQLITE_NOMEM_BKPT;
+ }
+ pMem->z[pMem->n] = 0;
+ pMem->z[pMem->n+1] = 0;
+ pMem->flags |= MEM_Term;
+ }
+ }
+ pMem->flags &= ~MEM_Ephem;
+#ifdef SQLITE_DEBUG
+ pMem->pScopyFrom = 0;
+#endif
+
+ return SQLITE_OK;
+}
+
+/*
+** If the given Mem* has a zero-filled tail, turn it into an ordinary
+** blob stored in dynamically allocated space.
+*/
+#ifndef SQLITE_OMIT_INCRBLOB
+SQLITE_PRIVATE int sqlite3VdbeMemExpandBlob(Mem *pMem){
+ int nByte;
+ assert( pMem->flags & MEM_Zero );
+ assert( pMem->flags&MEM_Blob );
+ assert( (pMem->flags&MEM_RowSet)==0 );
+ assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) );
+
+ /* Set nByte to the number of bytes required to store the expanded blob. */
+ nByte = pMem->n + pMem->u.nZero;
+ if( nByte<=0 ){
+ nByte = 1;
+ }
+ if( sqlite3VdbeMemGrow(pMem, nByte, 1) ){
+ return SQLITE_NOMEM_BKPT;
+ }
+
+ memset(&pMem->z[pMem->n], 0, pMem->u.nZero);
+ pMem->n += pMem->u.nZero;
+ pMem->flags &= ~(MEM_Zero|MEM_Term);
+ return SQLITE_OK;
+}
+#endif
+
+/*
+** It is already known that pMem contains an unterminated string.
+** Add the zero terminator.
+*/
+static SQLITE_NOINLINE int vdbeMemAddTerminator(Mem *pMem){
+ if( sqlite3VdbeMemGrow(pMem, pMem->n+2, 1) ){
+ return SQLITE_NOMEM_BKPT;
+ }
+ pMem->z[pMem->n] = 0;
+ pMem->z[pMem->n+1] = 0;
+ pMem->flags |= MEM_Term;
+ return SQLITE_OK;
+}
+
+/*
+** Make sure the given Mem is \u0000 terminated.
+*/
+SQLITE_PRIVATE int sqlite3VdbeMemNulTerminate(Mem *pMem){
+ assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) );
+ testcase( (pMem->flags & (MEM_Term|MEM_Str))==(MEM_Term|MEM_Str) );
+ testcase( (pMem->flags & (MEM_Term|MEM_Str))==0 );
+ if( (pMem->flags & (MEM_Term|MEM_Str))!=MEM_Str ){
+ return SQLITE_OK; /* Nothing to do */
+ }else{
+ return vdbeMemAddTerminator(pMem);
+ }
+}
+
+/*
+** Add MEM_Str to the set of representations for the given Mem. Numbers
+** are converted using sqlite3_snprintf(). Converting a BLOB to a string
+** is a no-op.
+**
+** Existing representations MEM_Int and MEM_Real are invalidated if
+** bForce is true but are retained if bForce is false.
+**
+** A MEM_Null value will never be passed to this function. This function is
+** used for converting values to text for returning to the user (i.e. via
+** sqlite3_value_text()), or for ensuring that values to be used as btree
+** keys are strings. In the former case a NULL pointer is returned the
+** user and the latter is an internal programming error.
+*/
+SQLITE_PRIVATE int sqlite3VdbeMemStringify(Mem *pMem, u8 enc, u8 bForce){
+ int fg = pMem->flags;
+ const int nByte = 32;
+
+ assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) );
+ assert( !(fg&MEM_Zero) );
+ assert( !(fg&(MEM_Str|MEM_Blob)) );
+ assert( fg&(MEM_Int|MEM_Real) );
+ assert( (pMem->flags&MEM_RowSet)==0 );
+ assert( EIGHT_BYTE_ALIGNMENT(pMem) );
+
+
+ if( sqlite3VdbeMemClearAndResize(pMem, nByte) ){
+ pMem->enc = 0;
+ return SQLITE_NOMEM_BKPT;
+ }
+
+ /* For a Real or Integer, use sqlite3_snprintf() to produce the UTF-8
+ ** string representation of the value. Then, if the required encoding
+ ** is UTF-16le or UTF-16be do a translation.
+ **
+ ** FIX ME: It would be better if sqlite3_snprintf() could do UTF-16.
+ */
+ if( fg & MEM_Int ){
+ sqlite3_snprintf(nByte, pMem->z, "%lld", pMem->u.i);
+ }else{
+ assert( fg & MEM_Real );
+ sqlite3_snprintf(nByte, pMem->z, "%!.15g", pMem->u.r);
+ }
+ pMem->n = sqlite3Strlen30(pMem->z);
+ pMem->enc = SQLITE_UTF8;
+ pMem->flags |= MEM_Str|MEM_Term;
+ if( bForce ) pMem->flags &= ~(MEM_Int|MEM_Real);
+ sqlite3VdbeChangeEncoding(pMem, enc);
+ return SQLITE_OK;
+}
+
+/*
+** Memory cell pMem contains the context of an aggregate function.
+** This routine calls the finalize method for that function. The
+** result of the aggregate is stored back into pMem.
+**
+** Return SQLITE_ERROR if the finalizer reports an error. SQLITE_OK
+** otherwise.
+*/
+SQLITE_PRIVATE int sqlite3VdbeMemFinalize(Mem *pMem, FuncDef *pFunc){
+ int rc = SQLITE_OK;
+ if( ALWAYS(pFunc && pFunc->xFinalize) ){
+ sqlite3_context ctx;
+ Mem t;
+ assert( (pMem->flags & MEM_Null)!=0 || pFunc==pMem->u.pDef );
+ assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) );
+ memset(&ctx, 0, sizeof(ctx));
+ memset(&t, 0, sizeof(t));
+ t.flags = MEM_Null;
+ t.db = pMem->db;
+ ctx.pOut = &t;
+ ctx.pMem = pMem;
+ ctx.pFunc = pFunc;
+ pFunc->xFinalize(&ctx); /* IMP: R-24505-23230 */
+ assert( (pMem->flags & MEM_Dyn)==0 );
+ if( pMem->szMalloc>0 ) sqlite3DbFree(pMem->db, pMem->zMalloc);
+ memcpy(pMem, &t, sizeof(t));
+ rc = ctx.isError;
+ }
+ return rc;
+}
+
+/*
+** If the memory cell contains a value that must be freed by
+** invoking the external callback in Mem.xDel, then this routine
+** will free that value. It also sets Mem.flags to MEM_Null.
+**
+** This is a helper routine for sqlite3VdbeMemSetNull() and
+** for sqlite3VdbeMemRelease(). Use those other routines as the
+** entry point for releasing Mem resources.
+*/
+static SQLITE_NOINLINE void vdbeMemClearExternAndSetNull(Mem *p){
+ assert( p->db==0 || sqlite3_mutex_held(p->db->mutex) );
+ assert( VdbeMemDynamic(p) );
+ if( p->flags&MEM_Agg ){
+ sqlite3VdbeMemFinalize(p, p->u.pDef);
+ assert( (p->flags & MEM_Agg)==0 );
+ testcase( p->flags & MEM_Dyn );
+ }
+ if( p->flags&MEM_Dyn ){
+ assert( (p->flags&MEM_RowSet)==0 );
+ assert( p->xDel!=SQLITE_DYNAMIC && p->xDel!=0 );
+ p->xDel((void *)p->z);
+ }else if( p->flags&MEM_RowSet ){
+ sqlite3RowSetClear(p->u.pRowSet);
+ }else if( p->flags&MEM_Frame ){
+ VdbeFrame *pFrame = p->u.pFrame;
+ pFrame->pParent = pFrame->v->pDelFrame;
+ pFrame->v->pDelFrame = pFrame;
+ }
+ p->flags = MEM_Null;
+}
+
+/*
+** Release memory held by the Mem p, both external memory cleared
+** by p->xDel and memory in p->zMalloc.
+**
+** This is a helper routine invoked by sqlite3VdbeMemRelease() in
+** the unusual case where there really is memory in p that needs
+** to be freed.
+*/
+static SQLITE_NOINLINE void vdbeMemClear(Mem *p){
+ if( VdbeMemDynamic(p) ){
+ vdbeMemClearExternAndSetNull(p);
+ }
+ if( p->szMalloc ){
+ sqlite3DbFree(p->db, p->zMalloc);
+ p->szMalloc = 0;
+ }
+ p->z = 0;
+}
+
+/*
+** Release any memory resources held by the Mem. Both the memory that is
+** free by Mem.xDel and the Mem.zMalloc allocation are freed.
+**
+** Use this routine prior to clean up prior to abandoning a Mem, or to
+** reset a Mem back to its minimum memory utilization.
+**
+** Use sqlite3VdbeMemSetNull() to release just the Mem.xDel space
+** prior to inserting new content into the Mem.
+*/
+SQLITE_PRIVATE void sqlite3VdbeMemRelease(Mem *p){
+ assert( sqlite3VdbeCheckMemInvariants(p) );
+ if( VdbeMemDynamic(p) || p->szMalloc ){
+ vdbeMemClear(p);
+ }
+}
+
+/*
+** Convert a 64-bit IEEE double into a 64-bit signed integer.
+** If the double is out of range of a 64-bit signed integer then
+** return the closest available 64-bit signed integer.
+*/
+static i64 doubleToInt64(double r){
+#ifdef SQLITE_OMIT_FLOATING_POINT
+ /* When floating-point is omitted, double and int64 are the same thing */
+ return r;
+#else
+ /*
+ ** Many compilers we encounter do not define constants for the
+ ** minimum and maximum 64-bit integers, or they define them
+ ** inconsistently. And many do not understand the "LL" notation.
+ ** So we define our own static constants here using nothing
+ ** larger than a 32-bit integer constant.
+ */
+ static const i64 maxInt = LARGEST_INT64;
+ static const i64 minInt = SMALLEST_INT64;
+
+ if( r<=(double)minInt ){
+ return minInt;
+ }else if( r>=(double)maxInt ){
+ return maxInt;
+ }else{
+ return (i64)r;
+ }
+#endif
+}
+
+/*
+** Return some kind of integer value which is the best we can do
+** at representing the value that *pMem describes as an integer.
+** If pMem is an integer, then the value is exact. If pMem is
+** a floating-point then the value returned is the integer part.
+** If pMem is a string or blob, then we make an attempt to convert
+** it into an integer and return that. If pMem represents an
+** an SQL-NULL value, return 0.
+**
+** If pMem represents a string value, its encoding might be changed.
+*/
+SQLITE_PRIVATE i64 sqlite3VdbeIntValue(Mem *pMem){
+ int flags;
+ assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) );
+ assert( EIGHT_BYTE_ALIGNMENT(pMem) );
+ flags = pMem->flags;
+ if( flags & MEM_Int ){
+ return pMem->u.i;
+ }else if( flags & MEM_Real ){
+ return doubleToInt64(pMem->u.r);
+ }else if( flags & (MEM_Str|MEM_Blob) ){
+ i64 value = 0;
+ assert( pMem->z || pMem->n==0 );
+ sqlite3Atoi64(pMem->z, &value, pMem->n, pMem->enc);
+ return value;
+ }else{
+ return 0;
+ }
+}
+
+/*
+** Return the best representation of pMem that we can get into a
+** double. If pMem is already a double or an integer, return its
+** value. If it is a string or blob, try to convert it to a double.
+** If it is a NULL, return 0.0.
+*/
+SQLITE_PRIVATE double sqlite3VdbeRealValue(Mem *pMem){
+ assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) );
+ assert( EIGHT_BYTE_ALIGNMENT(pMem) );
+ if( pMem->flags & MEM_Real ){
+ return pMem->u.r;
+ }else if( pMem->flags & MEM_Int ){
+ return (double)pMem->u.i;
+ }else if( pMem->flags & (MEM_Str|MEM_Blob) ){
+ /* (double)0 In case of SQLITE_OMIT_FLOATING_POINT... */
+ double val = (double)0;
+ sqlite3AtoF(pMem->z, &val, pMem->n, pMem->enc);
+ return val;
+ }else{
+ /* (double)0 In case of SQLITE_OMIT_FLOATING_POINT... */
+ return (double)0;
+ }
+}
+
+/*
+** The MEM structure is already a MEM_Real. Try to also make it a
+** MEM_Int if we can.
+*/
+SQLITE_PRIVATE void sqlite3VdbeIntegerAffinity(Mem *pMem){
+ i64 ix;
+ assert( pMem->flags & MEM_Real );
+ assert( (pMem->flags & MEM_RowSet)==0 );
+ assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) );
+ assert( EIGHT_BYTE_ALIGNMENT(pMem) );
+
+ ix = doubleToInt64(pMem->u.r);
+
+ /* Only mark the value as an integer if
+ **
+ ** (1) the round-trip conversion real->int->real is a no-op, and
+ ** (2) The integer is neither the largest nor the smallest
+ ** possible integer (ticket #3922)
+ **
+ ** The second and third terms in the following conditional enforces
+ ** the second condition under the assumption that addition overflow causes
+ ** values to wrap around.
+ */
+ if( pMem->u.r==ix && ix>SMALLEST_INT64 && ix<LARGEST_INT64 ){
+ pMem->u.i = ix;
+ MemSetTypeFlag(pMem, MEM_Int);
+ }
+}
+
+/*
+** Convert pMem to type integer. Invalidate any prior representations.
+*/
+SQLITE_PRIVATE int sqlite3VdbeMemIntegerify(Mem *pMem){
+ assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) );
+ assert( (pMem->flags & MEM_RowSet)==0 );
+ assert( EIGHT_BYTE_ALIGNMENT(pMem) );
+
+ pMem->u.i = sqlite3VdbeIntValue(pMem);
+ MemSetTypeFlag(pMem, MEM_Int);
+ return SQLITE_OK;
+}
+
+/*
+** Convert pMem so that it is of type MEM_Real.
+** Invalidate any prior representations.
+*/
+SQLITE_PRIVATE int sqlite3VdbeMemRealify(Mem *pMem){
+ assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) );
+ assert( EIGHT_BYTE_ALIGNMENT(pMem) );
+
+ pMem->u.r = sqlite3VdbeRealValue(pMem);
+ MemSetTypeFlag(pMem, MEM_Real);
+ return SQLITE_OK;
+}
+
+/*
+** Convert pMem so that it has types MEM_Real or MEM_Int or both.
+** Invalidate any prior representations.
+**
+** Every effort is made to force the conversion, even if the input
+** is a string that does not look completely like a number. Convert
+** as much of the string as we can and ignore the rest.
+*/
+SQLITE_PRIVATE int sqlite3VdbeMemNumerify(Mem *pMem){
+ if( (pMem->flags & (MEM_Int|MEM_Real|MEM_Null))==0 ){
+ assert( (pMem->flags & (MEM_Blob|MEM_Str))!=0 );
+ assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) );
+ if( 0==sqlite3Atoi64(pMem->z, &pMem->u.i, pMem->n, pMem->enc) ){
+ MemSetTypeFlag(pMem, MEM_Int);
+ }else{
+ pMem->u.r = sqlite3VdbeRealValue(pMem);
+ MemSetTypeFlag(pMem, MEM_Real);
+ sqlite3VdbeIntegerAffinity(pMem);
+ }
+ }
+ assert( (pMem->flags & (MEM_Int|MEM_Real|MEM_Null))!=0 );
+ pMem->flags &= ~(MEM_Str|MEM_Blob|MEM_Zero);
+ return SQLITE_OK;
+}
+
+/*
+** Cast the datatype of the value in pMem according to the affinity
+** "aff". Casting is different from applying affinity in that a cast
+** is forced. In other words, the value is converted into the desired
+** affinity even if that results in loss of data. This routine is
+** used (for example) to implement the SQL "cast()" operator.
+*/
+SQLITE_PRIVATE void sqlite3VdbeMemCast(Mem *pMem, u8 aff, u8 encoding){
+ if( pMem->flags & MEM_Null ) return;
+ switch( aff ){
+ case SQLITE_AFF_BLOB: { /* Really a cast to BLOB */
+ if( (pMem->flags & MEM_Blob)==0 ){
+ sqlite3ValueApplyAffinity(pMem, SQLITE_AFF_TEXT, encoding);
+ assert( pMem->flags & MEM_Str || pMem->db->mallocFailed );
+ if( pMem->flags & MEM_Str ) MemSetTypeFlag(pMem, MEM_Blob);
+ }else{
+ pMem->flags &= ~(MEM_TypeMask&~MEM_Blob);
+ }
+ break;
+ }
+ case SQLITE_AFF_NUMERIC: {
+ sqlite3VdbeMemNumerify(pMem);
+ break;
+ }
+ case SQLITE_AFF_INTEGER: {
+ sqlite3VdbeMemIntegerify(pMem);
+ break;
+ }
+ case SQLITE_AFF_REAL: {
+ sqlite3VdbeMemRealify(pMem);
+ break;
+ }
+ default: {
+ assert( aff==SQLITE_AFF_TEXT );
+ assert( MEM_Str==(MEM_Blob>>3) );
+ pMem->flags |= (pMem->flags&MEM_Blob)>>3;
+ sqlite3ValueApplyAffinity(pMem, SQLITE_AFF_TEXT, encoding);
+ assert( pMem->flags & MEM_Str || pMem->db->mallocFailed );
+ pMem->flags &= ~(MEM_Int|MEM_Real|MEM_Blob|MEM_Zero);
+ break;
+ }
+ }
+}
+
+/*
+** Initialize bulk memory to be a consistent Mem object.
+**
+** The minimum amount of initialization feasible is performed.
+*/
+SQLITE_PRIVATE void sqlite3VdbeMemInit(Mem *pMem, sqlite3 *db, u16 flags){
+ assert( (flags & ~MEM_TypeMask)==0 );
+ pMem->flags = flags;
+ pMem->db = db;
+ pMem->szMalloc = 0;
+}
+
+
+/*
+** Delete any previous value and set the value stored in *pMem to NULL.
+**
+** This routine calls the Mem.xDel destructor to dispose of values that
+** require the destructor. But it preserves the Mem.zMalloc memory allocation.
+** To free all resources, use sqlite3VdbeMemRelease(), which both calls this
+** routine to invoke the destructor and deallocates Mem.zMalloc.
+**
+** Use this routine to reset the Mem prior to insert a new value.
+**
+** Use sqlite3VdbeMemRelease() to complete erase the Mem prior to abandoning it.
+*/
+SQLITE_PRIVATE void sqlite3VdbeMemSetNull(Mem *pMem){
+ if( VdbeMemDynamic(pMem) ){
+ vdbeMemClearExternAndSetNull(pMem);
+ }else{
+ pMem->flags = MEM_Null;
+ }
+}
+SQLITE_PRIVATE void sqlite3ValueSetNull(sqlite3_value *p){
+ sqlite3VdbeMemSetNull((Mem*)p);
+}
+
+/*
+** Delete any previous value and set the value to be a BLOB of length
+** n containing all zeros.
+*/
+SQLITE_PRIVATE void sqlite3VdbeMemSetZeroBlob(Mem *pMem, int n){
+ sqlite3VdbeMemRelease(pMem);
+ pMem->flags = MEM_Blob|MEM_Zero;
+ pMem->n = 0;
+ if( n<0 ) n = 0;
+ pMem->u.nZero = n;
+ pMem->enc = SQLITE_UTF8;
+ pMem->z = 0;
+}
+
+/*
+** The pMem is known to contain content that needs to be destroyed prior
+** to a value change. So invoke the destructor, then set the value to
+** a 64-bit integer.
+*/
+static SQLITE_NOINLINE void vdbeReleaseAndSetInt64(Mem *pMem, i64 val){
+ sqlite3VdbeMemSetNull(pMem);
+ pMem->u.i = val;
+ pMem->flags = MEM_Int;
+}
+
+/*
+** Delete any previous value and set the value stored in *pMem to val,
+** manifest type INTEGER.
+*/
+SQLITE_PRIVATE void sqlite3VdbeMemSetInt64(Mem *pMem, i64 val){
+ if( VdbeMemDynamic(pMem) ){
+ vdbeReleaseAndSetInt64(pMem, val);
+ }else{
+ pMem->u.i = val;
+ pMem->flags = MEM_Int;
+ }
+}
+
+#ifndef SQLITE_OMIT_FLOATING_POINT
+/*
+** Delete any previous value and set the value stored in *pMem to val,
+** manifest type REAL.
+*/
+SQLITE_PRIVATE void sqlite3VdbeMemSetDouble(Mem *pMem, double val){
+ sqlite3VdbeMemSetNull(pMem);
+ if( !sqlite3IsNaN(val) ){
+ pMem->u.r = val;
+ pMem->flags = MEM_Real;
+ }
+}
+#endif
+
+/*
+** Delete any previous value and set the value of pMem to be an
+** empty boolean index.
+*/
+SQLITE_PRIVATE void sqlite3VdbeMemSetRowSet(Mem *pMem){
+ sqlite3 *db = pMem->db;
+ assert( db!=0 );
+ assert( (pMem->flags & MEM_RowSet)==0 );
+ sqlite3VdbeMemRelease(pMem);
+ pMem->zMalloc = sqlite3DbMallocRawNN(db, 64);
+ if( db->mallocFailed ){
+ pMem->flags = MEM_Null;
+ pMem->szMalloc = 0;
+ }else{
+ assert( pMem->zMalloc );
+ pMem->szMalloc = sqlite3DbMallocSize(db, pMem->zMalloc);
+ pMem->u.pRowSet = sqlite3RowSetInit(db, pMem->zMalloc, pMem->szMalloc);
+ assert( pMem->u.pRowSet!=0 );
+ pMem->flags = MEM_RowSet;
+ }
+}
+
+/*
+** Return true if the Mem object contains a TEXT or BLOB that is
+** too large - whose size exceeds SQLITE_MAX_LENGTH.
+*/
+SQLITE_PRIVATE int sqlite3VdbeMemTooBig(Mem *p){
+ assert( p->db!=0 );
+ if( p->flags & (MEM_Str|MEM_Blob) ){
+ int n = p->n;
+ if( p->flags & MEM_Zero ){
+ n += p->u.nZero;
+ }
+ return n>p->db->aLimit[SQLITE_LIMIT_LENGTH];
+ }
+ return 0;
+}
+
+#ifdef SQLITE_DEBUG
+/*
+** This routine prepares a memory cell for modification by breaking
+** its link to a shallow copy and by marking any current shallow
+** copies of this cell as invalid.
+**
+** This is used for testing and debugging only - to make sure shallow
+** copies are not misused.
+*/
+SQLITE_PRIVATE void sqlite3VdbeMemAboutToChange(Vdbe *pVdbe, Mem *pMem){
+ int i;
+ Mem *pX;
+ for(i=0, pX=pVdbe->aMem; i<pVdbe->nMem; i++, pX++){
+ if( pX->pScopyFrom==pMem ){
+ pX->flags |= MEM_Undefined;
+ pX->pScopyFrom = 0;
+ }
+ }
+ pMem->pScopyFrom = 0;
+}
+#endif /* SQLITE_DEBUG */
+
+
+/*
+** Make an shallow copy of pFrom into pTo. Prior contents of
+** pTo are freed. The pFrom->z field is not duplicated. If
+** pFrom->z is used, then pTo->z points to the same thing as pFrom->z
+** and flags gets srcType (either MEM_Ephem or MEM_Static).
+*/
+static SQLITE_NOINLINE void vdbeClrCopy(Mem *pTo, const Mem *pFrom, int eType){
+ vdbeMemClearExternAndSetNull(pTo);
+ assert( !VdbeMemDynamic(pTo) );
+ sqlite3VdbeMemShallowCopy(pTo, pFrom, eType);
+}
+SQLITE_PRIVATE void sqlite3VdbeMemShallowCopy(Mem *pTo, const Mem *pFrom, int srcType){
+ assert( (pFrom->flags & MEM_RowSet)==0 );
+ assert( pTo->db==pFrom->db );
+ if( VdbeMemDynamic(pTo) ){ vdbeClrCopy(pTo,pFrom,srcType); return; }
+ memcpy(pTo, pFrom, MEMCELLSIZE);
+ if( (pFrom->flags&MEM_Static)==0 ){
+ pTo->flags &= ~(MEM_Dyn|MEM_Static|MEM_Ephem);
+ assert( srcType==MEM_Ephem || srcType==MEM_Static );
+ pTo->flags |= srcType;
+ }
+}
+
+/*
+** Make a full copy of pFrom into pTo. Prior contents of pTo are
+** freed before the copy is made.
+*/
+SQLITE_PRIVATE int sqlite3VdbeMemCopy(Mem *pTo, const Mem *pFrom){
+ int rc = SQLITE_OK;
+
+ assert( (pFrom->flags & MEM_RowSet)==0 );
+ if( VdbeMemDynamic(pTo) ) vdbeMemClearExternAndSetNull(pTo);
+ memcpy(pTo, pFrom, MEMCELLSIZE);
+ pTo->flags &= ~MEM_Dyn;
+ if( pTo->flags&(MEM_Str|MEM_Blob) ){
+ if( 0==(pFrom->flags&MEM_Static) ){
+ pTo->flags |= MEM_Ephem;
+ rc = sqlite3VdbeMemMakeWriteable(pTo);
+ }
+ }
+
+ return rc;
+}
+
+/*
+** Transfer the contents of pFrom to pTo. Any existing value in pTo is
+** freed. If pFrom contains ephemeral data, a copy is made.
+**
+** pFrom contains an SQL NULL when this routine returns.
+*/
+SQLITE_PRIVATE void sqlite3VdbeMemMove(Mem *pTo, Mem *pFrom){
+ assert( pFrom->db==0 || sqlite3_mutex_held(pFrom->db->mutex) );
+ assert( pTo->db==0 || sqlite3_mutex_held(pTo->db->mutex) );
+ assert( pFrom->db==0 || pTo->db==0 || pFrom->db==pTo->db );
+
+ sqlite3VdbeMemRelease(pTo);
+ memcpy(pTo, pFrom, sizeof(Mem));
+ pFrom->flags = MEM_Null;
+ pFrom->szMalloc = 0;
+}
+
+/*
+** Change the value of a Mem to be a string or a BLOB.
+**
+** The memory management strategy depends on the value of the xDel
+** parameter. If the value passed is SQLITE_TRANSIENT, then the
+** string is copied into a (possibly existing) buffer managed by the
+** Mem structure. Otherwise, any existing buffer is freed and the
+** pointer copied.
+**
+** If the string is too large (if it exceeds the SQLITE_LIMIT_LENGTH
+** size limit) then no memory allocation occurs. If the string can be
+** stored without allocating memory, then it is. If a memory allocation
+** is required to store the string, then value of pMem is unchanged. In
+** either case, SQLITE_TOOBIG is returned.
+*/
+SQLITE_PRIVATE int sqlite3VdbeMemSetStr(
+ Mem *pMem, /* Memory cell to set to string value */
+ const char *z, /* String pointer */
+ int n, /* Bytes in string, or negative */
+ u8 enc, /* Encoding of z. 0 for BLOBs */
+ void (*xDel)(void*) /* Destructor function */
+){
+ int nByte = n; /* New value for pMem->n */
+ int iLimit; /* Maximum allowed string or blob size */
+ u16 flags = 0; /* New value for pMem->flags */
+
+ assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) );
+ assert( (pMem->flags & MEM_RowSet)==0 );
+
+ /* If z is a NULL pointer, set pMem to contain an SQL NULL. */
+ if( !z ){
+ sqlite3VdbeMemSetNull(pMem);
+ return SQLITE_OK;
+ }
+
+ if( pMem->db ){
+ iLimit = pMem->db->aLimit[SQLITE_LIMIT_LENGTH];
+ }else{
+ iLimit = SQLITE_MAX_LENGTH;
+ }
+ flags = (enc==0?MEM_Blob:MEM_Str);
+ if( nByte<0 ){
+ assert( enc!=0 );
+ if( enc==SQLITE_UTF8 ){
+ nByte = sqlite3Strlen30(z);
+ if( nByte>iLimit ) nByte = iLimit+1;
+ }else{
+ for(nByte=0; nByte<=iLimit && (z[nByte] | z[nByte+1]); nByte+=2){}
+ }
+ flags |= MEM_Term;
+ }
+
+ /* The following block sets the new values of Mem.z and Mem.xDel. It
+ ** also sets a flag in local variable "flags" to indicate the memory
+ ** management (one of MEM_Dyn or MEM_Static).
+ */
+ if( xDel==SQLITE_TRANSIENT ){
+ int nAlloc = nByte;
+ if( flags&MEM_Term ){
+ nAlloc += (enc==SQLITE_UTF8?1:2);
+ }
+ if( nByte>iLimit ){
+ return SQLITE_TOOBIG;
+ }
+ testcase( nAlloc==0 );
+ testcase( nAlloc==31 );
+ testcase( nAlloc==32 );
+ if( sqlite3VdbeMemClearAndResize(pMem, MAX(nAlloc,32)) ){
+ return SQLITE_NOMEM_BKPT;
+ }
+ memcpy(pMem->z, z, nAlloc);
+ }else if( xDel==SQLITE_DYNAMIC ){
+ sqlite3VdbeMemRelease(pMem);
+ pMem->zMalloc = pMem->z = (char *)z;
+ pMem->szMalloc = sqlite3DbMallocSize(pMem->db, pMem->zMalloc);
+ }else{
+ sqlite3VdbeMemRelease(pMem);
+ pMem->z = (char *)z;
+ pMem->xDel = xDel;
+ flags |= ((xDel==SQLITE_STATIC)?MEM_Static:MEM_Dyn);
+ }
+
+ pMem->n = nByte;
+ pMem->flags = flags;
+ pMem->enc = (enc==0 ? SQLITE_UTF8 : enc);
+
+#ifndef SQLITE_OMIT_UTF16
+ if( pMem->enc!=SQLITE_UTF8 && sqlite3VdbeMemHandleBom(pMem) ){
+ return SQLITE_NOMEM_BKPT;
+ }
+#endif
+
+ if( nByte>iLimit ){
+ return SQLITE_TOOBIG;
+ }
+
+ return SQLITE_OK;
+}
+
+/*
+** Move data out of a btree key or data field and into a Mem structure.
+** The data is payload from the entry that pCur is currently pointing
+** to. offset and amt determine what portion of the data or key to retrieve.
+** The result is written into the pMem element.
+**
+** The pMem object must have been initialized. This routine will use
+** pMem->zMalloc to hold the content from the btree, if possible. New
+** pMem->zMalloc space will be allocated if necessary. The calling routine
+** is responsible for making sure that the pMem object is eventually
+** destroyed.
+**
+** If this routine fails for any reason (malloc returns NULL or unable
+** to read from the disk) then the pMem is left in an inconsistent state.
+*/
+static SQLITE_NOINLINE int vdbeMemFromBtreeResize(
+ BtCursor *pCur, /* Cursor pointing at record to retrieve. */
+ u32 offset, /* Offset from the start of data to return bytes from. */
+ u32 amt, /* Number of bytes to return. */
+ Mem *pMem /* OUT: Return data in this Mem structure. */
+){
+ int rc;
+ pMem->flags = MEM_Null;
+ if( SQLITE_OK==(rc = sqlite3VdbeMemClearAndResize(pMem, amt+2)) ){
+ rc = sqlite3BtreePayload(pCur, offset, amt, pMem->z);
+ if( rc==SQLITE_OK ){
+ pMem->z[amt] = 0;
+ pMem->z[amt+1] = 0;
+ pMem->flags = MEM_Blob|MEM_Term;
+ pMem->n = (int)amt;
+ }else{
+ sqlite3VdbeMemRelease(pMem);
+ }
+ }
+ return rc;
+}
+SQLITE_PRIVATE int sqlite3VdbeMemFromBtree(
+ BtCursor *pCur, /* Cursor pointing at record to retrieve. */
+ u32 offset, /* Offset from the start of data to return bytes from. */
+ u32 amt, /* Number of bytes to return. */
+ Mem *pMem /* OUT: Return data in this Mem structure. */
+){
+ char *zData; /* Data from the btree layer */
+ u32 available = 0; /* Number of bytes available on the local btree page */
+ int rc = SQLITE_OK; /* Return code */
+
+ assert( sqlite3BtreeCursorIsValid(pCur) );
+ assert( !VdbeMemDynamic(pMem) );
+
+ /* Note: the calls to BtreeKeyFetch() and DataFetch() below assert()
+ ** that both the BtShared and database handle mutexes are held. */
+ assert( (pMem->flags & MEM_RowSet)==0 );
+ zData = (char *)sqlite3BtreePayloadFetch(pCur, &available);
+ assert( zData!=0 );
+
+ if( offset+amt<=available ){
+ pMem->z = &zData[offset];
+ pMem->flags = MEM_Blob|MEM_Ephem;
+ pMem->n = (int)amt;
+ }else{
+ rc = vdbeMemFromBtreeResize(pCur, offset, amt, pMem);
+ }
+
+ return rc;
+}
+
+/*
+** The pVal argument is known to be a value other than NULL.
+** Convert it into a string with encoding enc and return a pointer
+** to a zero-terminated version of that string.
+*/
+static SQLITE_NOINLINE const void *valueToText(sqlite3_value* pVal, u8 enc){
+ assert( pVal!=0 );
+ assert( pVal->db==0 || sqlite3_mutex_held(pVal->db->mutex) );
+ assert( (enc&3)==(enc&~SQLITE_UTF16_ALIGNED) );
+ assert( (pVal->flags & MEM_RowSet)==0 );
+ assert( (pVal->flags & (MEM_Null))==0 );
+ if( pVal->flags & (MEM_Blob|MEM_Str) ){
+ if( ExpandBlob(pVal) ) return 0;
+ pVal->flags |= MEM_Str;
+ if( pVal->enc != (enc & ~SQLITE_UTF16_ALIGNED) ){
+ sqlite3VdbeChangeEncoding(pVal, enc & ~SQLITE_UTF16_ALIGNED);
+ }
+ if( (enc & SQLITE_UTF16_ALIGNED)!=0 && 1==(1&SQLITE_PTR_TO_INT(pVal->z)) ){
+ assert( (pVal->flags & (MEM_Ephem|MEM_Static))!=0 );
+ if( sqlite3VdbeMemMakeWriteable(pVal)!=SQLITE_OK ){
+ return 0;
+ }
+ }
+ sqlite3VdbeMemNulTerminate(pVal); /* IMP: R-31275-44060 */
+ }else{
+ sqlite3VdbeMemStringify(pVal, enc, 0);
+ assert( 0==(1&SQLITE_PTR_TO_INT(pVal->z)) );
+ }
+ assert(pVal->enc==(enc & ~SQLITE_UTF16_ALIGNED) || pVal->db==0
+ || pVal->db->mallocFailed );
+ if( pVal->enc==(enc & ~SQLITE_UTF16_ALIGNED) ){
+ return pVal->z;
+ }else{
+ return 0;
+ }
+}
+
+/* This function is only available internally, it is not part of the
+** external API. It works in a similar way to sqlite3_value_text(),
+** except the data returned is in the encoding specified by the second
+** parameter, which must be one of SQLITE_UTF16BE, SQLITE_UTF16LE or
+** SQLITE_UTF8.
+**
+** (2006-02-16:) The enc value can be or-ed with SQLITE_UTF16_ALIGNED.
+** If that is the case, then the result must be aligned on an even byte
+** boundary.
+*/
+SQLITE_PRIVATE const void *sqlite3ValueText(sqlite3_value* pVal, u8 enc){
+ if( !pVal ) return 0;
+ assert( pVal->db==0 || sqlite3_mutex_held(pVal->db->mutex) );
+ assert( (enc&3)==(enc&~SQLITE_UTF16_ALIGNED) );
+ assert( (pVal->flags & MEM_RowSet)==0 );
+ if( (pVal->flags&(MEM_Str|MEM_Term))==(MEM_Str|MEM_Term) && pVal->enc==enc ){
+ return pVal->z;
+ }
+ if( pVal->flags&MEM_Null ){
+ return 0;
+ }
+ return valueToText(pVal, enc);
+}
+
+/*
+** Create a new sqlite3_value object.
+*/
+SQLITE_PRIVATE sqlite3_value *sqlite3ValueNew(sqlite3 *db){
+ Mem *p = sqlite3DbMallocZero(db, sizeof(*p));
+ if( p ){
+ p->flags = MEM_Null;
+ p->db = db;
+ }
+ return p;
+}
+
+/*
+** Context object passed by sqlite3Stat4ProbeSetValue() through to
+** valueNew(). See comments above valueNew() for details.
+*/
+struct ValueNewStat4Ctx {
+ Parse *pParse;
+ Index *pIdx;
+ UnpackedRecord **ppRec;
+ int iVal;
+};
+
+/*
+** Allocate and return a pointer to a new sqlite3_value object. If
+** the second argument to this function is NULL, the object is allocated
+** by calling sqlite3ValueNew().
+**
+** Otherwise, if the second argument is non-zero, then this function is
+** being called indirectly by sqlite3Stat4ProbeSetValue(). If it has not
+** already been allocated, allocate the UnpackedRecord structure that
+** that function will return to its caller here. Then return a pointer to
+** an sqlite3_value within the UnpackedRecord.a[] array.
+*/
+static sqlite3_value *valueNew(sqlite3 *db, struct ValueNewStat4Ctx *p){
+#ifdef SQLITE_ENABLE_STAT3_OR_STAT4
+ if( p ){
+ UnpackedRecord *pRec = p->ppRec[0];
+
+ if( pRec==0 ){
+ Index *pIdx = p->pIdx; /* Index being probed */
+ int nByte; /* Bytes of space to allocate */
+ int i; /* Counter variable */
+ int nCol = pIdx->nColumn; /* Number of index columns including rowid */
+
+ nByte = sizeof(Mem) * nCol + ROUND8(sizeof(UnpackedRecord));
+ pRec = (UnpackedRecord*)sqlite3DbMallocZero(db, nByte);
+ if( pRec ){
+ pRec->pKeyInfo = sqlite3KeyInfoOfIndex(p->pParse, pIdx);
+ if( pRec->pKeyInfo ){
+ assert( pRec->pKeyInfo->nField+pRec->pKeyInfo->nXField==nCol );
+ assert( pRec->pKeyInfo->enc==ENC(db) );
+ pRec->aMem = (Mem *)((u8*)pRec + ROUND8(sizeof(UnpackedRecord)));
+ for(i=0; i<nCol; i++){
+ pRec->aMem[i].flags = MEM_Null;
+ pRec->aMem[i].db = db;
+ }
+ }else{
+ sqlite3DbFree(db, pRec);
+ pRec = 0;
+ }
+ }
+ if( pRec==0 ) return 0;
+ p->ppRec[0] = pRec;
+ }
+
+ pRec->nField = p->iVal+1;
+ return &pRec->aMem[p->iVal];
+ }
+#else
+ UNUSED_PARAMETER(p);
+#endif /* defined(SQLITE_ENABLE_STAT3_OR_STAT4) */
+ return sqlite3ValueNew(db);
+}
+
+/*
+** The expression object indicated by the second argument is guaranteed
+** to be a scalar SQL function. If
+**
+** * all function arguments are SQL literals,
+** * one of the SQLITE_FUNC_CONSTANT or _SLOCHNG function flags is set, and
+** * the SQLITE_FUNC_NEEDCOLL function flag is not set,
+**
+** then this routine attempts to invoke the SQL function. Assuming no
+** error occurs, output parameter (*ppVal) is set to point to a value
+** object containing the result before returning SQLITE_OK.
+**
+** Affinity aff is applied to the result of the function before returning.
+** If the result is a text value, the sqlite3_value object uses encoding
+** enc.
+**
+** If the conditions above are not met, this function returns SQLITE_OK
+** and sets (*ppVal) to NULL. Or, if an error occurs, (*ppVal) is set to
+** NULL and an SQLite error code returned.
+*/
+#ifdef SQLITE_ENABLE_STAT3_OR_STAT4
+static int valueFromFunction(
+ sqlite3 *db, /* The database connection */
+ Expr *p, /* The expression to evaluate */
+ u8 enc, /* Encoding to use */
+ u8 aff, /* Affinity to use */
+ sqlite3_value **ppVal, /* Write the new value here */
+ struct ValueNewStat4Ctx *pCtx /* Second argument for valueNew() */
+){
+ sqlite3_context ctx; /* Context object for function invocation */
+ sqlite3_value **apVal = 0; /* Function arguments */
+ int nVal = 0; /* Size of apVal[] array */
+ FuncDef *pFunc = 0; /* Function definition */
+ sqlite3_value *pVal = 0; /* New value */
+ int rc = SQLITE_OK; /* Return code */
+ ExprList *pList = 0; /* Function arguments */
+ int i; /* Iterator variable */
+
+ assert( pCtx!=0 );
+ assert( (p->flags & EP_TokenOnly)==0 );
+ pList = p->x.pList;
+ if( pList ) nVal = pList->nExpr;
+ pFunc = sqlite3FindFunction(db, p->u.zToken, nVal, enc, 0);
+ assert( pFunc );
+ if( (pFunc->funcFlags & (SQLITE_FUNC_CONSTANT|SQLITE_FUNC_SLOCHNG))==0
+ || (pFunc->funcFlags & SQLITE_FUNC_NEEDCOLL)
+ ){
+ return SQLITE_OK;
+ }
+
+ if( pList ){
+ apVal = (sqlite3_value**)sqlite3DbMallocZero(db, sizeof(apVal[0]) * nVal);
+ if( apVal==0 ){
+ rc = SQLITE_NOMEM_BKPT;
+ goto value_from_function_out;
+ }
+ for(i=0; i<nVal; i++){
+ rc = sqlite3ValueFromExpr(db, pList->a[i].pExpr, enc, aff, &apVal[i]);
+ if( apVal[i]==0 || rc!=SQLITE_OK ) goto value_from_function_out;
+ }
+ }
+
+ pVal = valueNew(db, pCtx);
+ if( pVal==0 ){
+ rc = SQLITE_NOMEM_BKPT;
+ goto value_from_function_out;
+ }
+
+ assert( pCtx->pParse->rc==SQLITE_OK );
+ memset(&ctx, 0, sizeof(ctx));
+ ctx.pOut = pVal;
+ ctx.pFunc = pFunc;
+ pFunc->xSFunc(&ctx, nVal, apVal);
+ if( ctx.isError ){
+ rc = ctx.isError;
+ sqlite3ErrorMsg(pCtx->pParse, "%s", sqlite3_value_text(pVal));
+ }else{
+ sqlite3ValueApplyAffinity(pVal, aff, SQLITE_UTF8);
+ assert( rc==SQLITE_OK );
+ rc = sqlite3VdbeChangeEncoding(pVal, enc);
+ if( rc==SQLITE_OK && sqlite3VdbeMemTooBig(pVal) ){
+ rc = SQLITE_TOOBIG;
+ pCtx->pParse->nErr++;
+ }
+ }
+ pCtx->pParse->rc = rc;
+
+ value_from_function_out:
+ if( rc!=SQLITE_OK ){
+ pVal = 0;
+ }
+ if( apVal ){
+ for(i=0; i<nVal; i++){
+ sqlite3ValueFree(apVal[i]);
+ }
+ sqlite3DbFree(db, apVal);
+ }
+
+ *ppVal = pVal;
+ return rc;
+}
+#else
+# define valueFromFunction(a,b,c,d,e,f) SQLITE_OK
+#endif /* defined(SQLITE_ENABLE_STAT3_OR_STAT4) */
+
+/*
+** Extract a value from the supplied expression in the manner described
+** above sqlite3ValueFromExpr(). Allocate the sqlite3_value object
+** using valueNew().
+**
+** If pCtx is NULL and an error occurs after the sqlite3_value object
+** has been allocated, it is freed before returning. Or, if pCtx is not
+** NULL, it is assumed that the caller will free any allocated object
+** in all cases.
+*/
+static int valueFromExpr(
+ sqlite3 *db, /* The database connection */
+ Expr *pExpr, /* The expression to evaluate */
+ u8 enc, /* Encoding to use */
+ u8 affinity, /* Affinity to use */
+ sqlite3_value **ppVal, /* Write the new value here */
+ struct ValueNewStat4Ctx *pCtx /* Second argument for valueNew() */
+){
+ int op;
+ char *zVal = 0;
+ sqlite3_value *pVal = 0;
+ int negInt = 1;
+ const char *zNeg = "";
+ int rc = SQLITE_OK;
+
+ assert( pExpr!=0 );
+ while( (op = pExpr->op)==TK_UPLUS || op==TK_SPAN ) pExpr = pExpr->pLeft;
+ if( NEVER(op==TK_REGISTER) ) op = pExpr->op2;
+
+ /* Compressed expressions only appear when parsing the DEFAULT clause
+ ** on a table column definition, and hence only when pCtx==0. This
+ ** check ensures that an EP_TokenOnly expression is never passed down
+ ** into valueFromFunction(). */
+ assert( (pExpr->flags & EP_TokenOnly)==0 || pCtx==0 );
+
+ if( op==TK_CAST ){
+ u8 aff = sqlite3AffinityType(pExpr->u.zToken,0);
+ rc = valueFromExpr(db, pExpr->pLeft, enc, aff, ppVal, pCtx);
+ testcase( rc!=SQLITE_OK );
+ if( *ppVal ){
+ sqlite3VdbeMemCast(*ppVal, aff, SQLITE_UTF8);
+ sqlite3ValueApplyAffinity(*ppVal, affinity, SQLITE_UTF8);
+ }
+ return rc;
+ }
+
+ /* Handle negative integers in a single step. This is needed in the
+ ** case when the value is -9223372036854775808.
+ */
+ if( op==TK_UMINUS
+ && (pExpr->pLeft->op==TK_INTEGER || pExpr->pLeft->op==TK_FLOAT) ){
+ pExpr = pExpr->pLeft;
+ op = pExpr->op;
+ negInt = -1;
+ zNeg = "-";
+ }
+
+ if( op==TK_STRING || op==TK_FLOAT || op==TK_INTEGER ){
+ pVal = valueNew(db, pCtx);
+ if( pVal==0 ) goto no_mem;
+ if( ExprHasProperty(pExpr, EP_IntValue) ){
+ sqlite3VdbeMemSetInt64(pVal, (i64)pExpr->u.iValue*negInt);
+ }else{
+ zVal = sqlite3MPrintf(db, "%s%s", zNeg, pExpr->u.zToken);
+ if( zVal==0 ) goto no_mem;
+ sqlite3ValueSetStr(pVal, -1, zVal, SQLITE_UTF8, SQLITE_DYNAMIC);
+ }
+ if( (op==TK_INTEGER || op==TK_FLOAT ) && affinity==SQLITE_AFF_BLOB ){
+ sqlite3ValueApplyAffinity(pVal, SQLITE_AFF_NUMERIC, SQLITE_UTF8);
+ }else{
+ sqlite3ValueApplyAffinity(pVal, affinity, SQLITE_UTF8);
+ }
+ if( pVal->flags & (MEM_Int|MEM_Real) ) pVal->flags &= ~MEM_Str;
+ if( enc!=SQLITE_UTF8 ){
+ rc = sqlite3VdbeChangeEncoding(pVal, enc);
+ }
+ }else if( op==TK_UMINUS ) {
+ /* This branch happens for multiple negative signs. Ex: -(-5) */
+ if( SQLITE_OK==sqlite3ValueFromExpr(db,pExpr->pLeft,enc,affinity,&pVal)
+ && pVal!=0
+ ){
+ sqlite3VdbeMemNumerify(pVal);
+ if( pVal->flags & MEM_Real ){
+ pVal->u.r = -pVal->u.r;
+ }else if( pVal->u.i==SMALLEST_INT64 ){
+ pVal->u.r = -(double)SMALLEST_INT64;
+ MemSetTypeFlag(pVal, MEM_Real);
+ }else{
+ pVal->u.i = -pVal->u.i;
+ }
+ sqlite3ValueApplyAffinity(pVal, affinity, enc);
+ }
+ }else if( op==TK_NULL ){
+ pVal = valueNew(db, pCtx);
+ if( pVal==0 ) goto no_mem;
+ sqlite3VdbeMemNumerify(pVal);
+ }
+#ifndef SQLITE_OMIT_BLOB_LITERAL
+ else if( op==TK_BLOB ){
+ int nVal;
+ assert( pExpr->u.zToken[0]=='x' || pExpr->u.zToken[0]=='X' );
+ assert( pExpr->u.zToken[1]=='\'' );
+ pVal = valueNew(db, pCtx);
+ if( !pVal ) goto no_mem;
+ zVal = &pExpr->u.zToken[2];
+ nVal = sqlite3Strlen30(zVal)-1;
+ assert( zVal[nVal]=='\'' );
+ sqlite3VdbeMemSetStr(pVal, sqlite3HexToBlob(db, zVal, nVal), nVal/2,
+ 0, SQLITE_DYNAMIC);
+ }
+#endif
+
+#ifdef SQLITE_ENABLE_STAT3_OR_STAT4
+ else if( op==TK_FUNCTION && pCtx!=0 ){
+ rc = valueFromFunction(db, pExpr, enc, affinity, &pVal, pCtx);
+ }
+#endif
+
+ *ppVal = pVal;
+ return rc;
+
+no_mem:
+ sqlite3OomFault(db);
+ sqlite3DbFree(db, zVal);
+ assert( *ppVal==0 );
+#ifdef SQLITE_ENABLE_STAT3_OR_STAT4
+ if( pCtx==0 ) sqlite3ValueFree(pVal);
+#else
+ assert( pCtx==0 ); sqlite3ValueFree(pVal);
+#endif
+ return SQLITE_NOMEM_BKPT;
+}
+
+/*
+** Create a new sqlite3_value object, containing the value of pExpr.
+**
+** This only works for very simple expressions that consist of one constant
+** token (i.e. "5", "5.1", "'a string'"). If the expression can
+** be converted directly into a value, then the value is allocated and
+** a pointer written to *ppVal. The caller is responsible for deallocating
+** the value by passing it to sqlite3ValueFree() later on. If the expression
+** cannot be converted to a value, then *ppVal is set to NULL.
+*/
+SQLITE_PRIVATE int sqlite3ValueFromExpr(
+ sqlite3 *db, /* The database connection */
+ Expr *pExpr, /* The expression to evaluate */
+ u8 enc, /* Encoding to use */
+ u8 affinity, /* Affinity to use */
+ sqlite3_value **ppVal /* Write the new value here */
+){
+ return pExpr ? valueFromExpr(db, pExpr, enc, affinity, ppVal, 0) : 0;
+}
+
+#ifdef SQLITE_ENABLE_STAT3_OR_STAT4
+/*
+** The implementation of the sqlite_record() function. This function accepts
+** a single argument of any type. The return value is a formatted database
+** record (a blob) containing the argument value.
+**
+** This is used to convert the value stored in the 'sample' column of the
+** sqlite_stat3 table to the record format SQLite uses internally.
+*/
+static void recordFunc(
+ sqlite3_context *context,
+ int argc,
+ sqlite3_value **argv
+){
+ const int file_format = 1;
+ u32 iSerial; /* Serial type */
+ int nSerial; /* Bytes of space for iSerial as varint */
+ u32 nVal; /* Bytes of space required for argv[0] */
+ int nRet;
+ sqlite3 *db;
+ u8 *aRet;
+
+ UNUSED_PARAMETER( argc );
+ iSerial = sqlite3VdbeSerialType(argv[0], file_format, &nVal);
+ nSerial = sqlite3VarintLen(iSerial);
+ db = sqlite3_context_db_handle(context);
+
+ nRet = 1 + nSerial + nVal;
+ aRet = sqlite3DbMallocRawNN(db, nRet);
+ if( aRet==0 ){
+ sqlite3_result_error_nomem(context);
+ }else{
+ aRet[0] = nSerial+1;
+ putVarint32(&aRet[1], iSerial);
+ sqlite3VdbeSerialPut(&aRet[1+nSerial], argv[0], iSerial);
+ sqlite3_result_blob(context, aRet, nRet, SQLITE_TRANSIENT);
+ sqlite3DbFree(db, aRet);
+ }
+}
+
+/*
+** Register built-in functions used to help read ANALYZE data.
+*/
+SQLITE_PRIVATE void sqlite3AnalyzeFunctions(void){
+ static FuncDef aAnalyzeTableFuncs[] = {
+ FUNCTION(sqlite_record, 1, 0, 0, recordFunc),
+ };
+ sqlite3InsertBuiltinFuncs(aAnalyzeTableFuncs, ArraySize(aAnalyzeTableFuncs));
+}
+
+/*
+** Attempt to extract a value from pExpr and use it to construct *ppVal.
+**
+** If pAlloc is not NULL, then an UnpackedRecord object is created for
+** pAlloc if one does not exist and the new value is added to the
+** UnpackedRecord object.
+**
+** A value is extracted in the following cases:
+**
+** * (pExpr==0). In this case the value is assumed to be an SQL NULL,
+**
+** * The expression is a bound variable, and this is a reprepare, or
+**
+** * The expression is a literal value.
+**
+** On success, *ppVal is made to point to the extracted value. The caller
+** is responsible for ensuring that the value is eventually freed.
+*/
+static int stat4ValueFromExpr(
+ Parse *pParse, /* Parse context */
+ Expr *pExpr, /* The expression to extract a value from */
+ u8 affinity, /* Affinity to use */
+ struct ValueNewStat4Ctx *pAlloc,/* How to allocate space. Or NULL */
+ sqlite3_value **ppVal /* OUT: New value object (or NULL) */
+){
+ int rc = SQLITE_OK;
+ sqlite3_value *pVal = 0;
+ sqlite3 *db = pParse->db;
+
+ /* Skip over any TK_COLLATE nodes */
+ pExpr = sqlite3ExprSkipCollate(pExpr);
+
+ if( !pExpr ){
+ pVal = valueNew(db, pAlloc);
+ if( pVal ){
+ sqlite3VdbeMemSetNull((Mem*)pVal);
+ }
+ }else if( pExpr->op==TK_VARIABLE
+ || NEVER(pExpr->op==TK_REGISTER && pExpr->op2==TK_VARIABLE)
+ ){
+ Vdbe *v;
+ int iBindVar = pExpr->iColumn;
+ sqlite3VdbeSetVarmask(pParse->pVdbe, iBindVar);
+ if( (v = pParse->pReprepare)!=0 ){
+ pVal = valueNew(db, pAlloc);
+ if( pVal ){
+ rc = sqlite3VdbeMemCopy((Mem*)pVal, &v->aVar[iBindVar-1]);
+ if( rc==SQLITE_OK ){
+ sqlite3ValueApplyAffinity(pVal, affinity, ENC(db));
+ }
+ pVal->db = pParse->db;
+ }
+ }
+ }else{
+ rc = valueFromExpr(db, pExpr, ENC(db), affinity, &pVal, pAlloc);
+ }
+
+ assert( pVal==0 || pVal->db==db );
+ *ppVal = pVal;
+ return rc;
+}
+
+/*
+** This function is used to allocate and populate UnpackedRecord
+** structures intended to be compared against sample index keys stored
+** in the sqlite_stat4 table.
+**
+** A single call to this function populates zero or more fields of the
+** record starting with field iVal (fields are numbered from left to
+** right starting with 0). A single field is populated if:
+**
+** * (pExpr==0). In this case the value is assumed to be an SQL NULL,
+**
+** * The expression is a bound variable, and this is a reprepare, or
+**
+** * The sqlite3ValueFromExpr() function is able to extract a value
+** from the expression (i.e. the expression is a literal value).
+**
+** Or, if pExpr is a TK_VECTOR, one field is populated for each of the
+** vector components that match either of the two latter criteria listed
+** above.
+**
+** Before any value is appended to the record, the affinity of the
+** corresponding column within index pIdx is applied to it. Before
+** this function returns, output parameter *pnExtract is set to the
+** number of values appended to the record.
+**
+** When this function is called, *ppRec must either point to an object
+** allocated by an earlier call to this function, or must be NULL. If it
+** is NULL and a value can be successfully extracted, a new UnpackedRecord
+** is allocated (and *ppRec set to point to it) before returning.
+**
+** Unless an error is encountered, SQLITE_OK is returned. It is not an
+** error if a value cannot be extracted from pExpr. If an error does
+** occur, an SQLite error code is returned.
+*/
+SQLITE_PRIVATE int sqlite3Stat4ProbeSetValue(
+ Parse *pParse, /* Parse context */
+ Index *pIdx, /* Index being probed */
+ UnpackedRecord **ppRec, /* IN/OUT: Probe record */
+ Expr *pExpr, /* The expression to extract a value from */
+ int nElem, /* Maximum number of values to append */
+ int iVal, /* Array element to populate */
+ int *pnExtract /* OUT: Values appended to the record */
+){
+ int rc = SQLITE_OK;
+ int nExtract = 0;
+
+ if( pExpr==0 || pExpr->op!=TK_SELECT ){
+ int i;
+ struct ValueNewStat4Ctx alloc;
+
+ alloc.pParse = pParse;
+ alloc.pIdx = pIdx;
+ alloc.ppRec = ppRec;
+
+ for(i=0; i<nElem; i++){
+ sqlite3_value *pVal = 0;
+ Expr *pElem = (pExpr ? sqlite3VectorFieldSubexpr(pExpr, i) : 0);
+ u8 aff = sqlite3IndexColumnAffinity(pParse->db, pIdx, iVal+i);
+ alloc.iVal = iVal+i;
+ rc = stat4ValueFromExpr(pParse, pElem, aff, &alloc, &pVal);
+ if( !pVal ) break;
+ nExtract++;
+ }
+ }
+
+ *pnExtract = nExtract;
+ return rc;
+}
+
+/*
+** Attempt to extract a value from expression pExpr using the methods
+** as described for sqlite3Stat4ProbeSetValue() above.
+**
+** If successful, set *ppVal to point to a new value object and return
+** SQLITE_OK. If no value can be extracted, but no other error occurs
+** (e.g. OOM), return SQLITE_OK and set *ppVal to NULL. Or, if an error
+** does occur, return an SQLite error code. The final value of *ppVal
+** is undefined in this case.
+*/
+SQLITE_PRIVATE int sqlite3Stat4ValueFromExpr(
+ Parse *pParse, /* Parse context */
+ Expr *pExpr, /* The expression to extract a value from */
+ u8 affinity, /* Affinity to use */
+ sqlite3_value **ppVal /* OUT: New value object (or NULL) */
+){
+ return stat4ValueFromExpr(pParse, pExpr, affinity, 0, ppVal);
+}
+
+/*
+** Extract the iCol-th column from the nRec-byte record in pRec. Write
+** the column value into *ppVal. If *ppVal is initially NULL then a new
+** sqlite3_value object is allocated.
+**
+** If *ppVal is initially NULL then the caller is responsible for
+** ensuring that the value written into *ppVal is eventually freed.
+*/
+SQLITE_PRIVATE int sqlite3Stat4Column(
+ sqlite3 *db, /* Database handle */
+ const void *pRec, /* Pointer to buffer containing record */
+ int nRec, /* Size of buffer pRec in bytes */
+ int iCol, /* Column to extract */
+ sqlite3_value **ppVal /* OUT: Extracted value */
+){
+ u32 t; /* a column type code */
+ int nHdr; /* Size of the header in the record */
+ int iHdr; /* Next unread header byte */
+ int iField; /* Next unread data byte */
+ int szField; /* Size of the current data field */
+ int i; /* Column index */
+ u8 *a = (u8*)pRec; /* Typecast byte array */
+ Mem *pMem = *ppVal; /* Write result into this Mem object */
+
+ assert( iCol>0 );
+ iHdr = getVarint32(a, nHdr);
+ if( nHdr>nRec || iHdr>=nHdr ) return SQLITE_CORRUPT_BKPT;
+ iField = nHdr;
+ for(i=0; i<=iCol; i++){
+ iHdr += getVarint32(&a[iHdr], t);
+ testcase( iHdr==nHdr );
+ testcase( iHdr==nHdr+1 );
+ if( iHdr>nHdr ) return SQLITE_CORRUPT_BKPT;
+ szField = sqlite3VdbeSerialTypeLen(t);
+ iField += szField;
+ }
+ testcase( iField==nRec );
+ testcase( iField==nRec+1 );
+ if( iField>nRec ) return SQLITE_CORRUPT_BKPT;
+ if( pMem==0 ){
+ pMem = *ppVal = sqlite3ValueNew(db);
+ if( pMem==0 ) return SQLITE_NOMEM_BKPT;
+ }
+ sqlite3VdbeSerialGet(&a[iField-szField], t, pMem);
+ pMem->enc = ENC(db);
+ return SQLITE_OK;
+}
+
+/*
+** Unless it is NULL, the argument must be an UnpackedRecord object returned
+** by an earlier call to sqlite3Stat4ProbeSetValue(). This call deletes
+** the object.
+*/
+SQLITE_PRIVATE void sqlite3Stat4ProbeFree(UnpackedRecord *pRec){
+ if( pRec ){
+ int i;
+ int nCol = pRec->pKeyInfo->nField+pRec->pKeyInfo->nXField;
+ Mem *aMem = pRec->aMem;
+ sqlite3 *db = aMem[0].db;
+ for(i=0; i<nCol; i++){
+ sqlite3VdbeMemRelease(&aMem[i]);
+ }
+ sqlite3KeyInfoUnref(pRec->pKeyInfo);
+ sqlite3DbFree(db, pRec);
+ }
+}
+#endif /* ifdef SQLITE_ENABLE_STAT4 */
+
+/*
+** Change the string value of an sqlite3_value object
+*/
+SQLITE_PRIVATE void sqlite3ValueSetStr(
+ sqlite3_value *v, /* Value to be set */
+ int n, /* Length of string z */
+ const void *z, /* Text of the new string */
+ u8 enc, /* Encoding to use */
+ void (*xDel)(void*) /* Destructor for the string */
+){
+ if( v ) sqlite3VdbeMemSetStr((Mem *)v, z, n, enc, xDel);
+}
+
+/*
+** Free an sqlite3_value object
+*/
+SQLITE_PRIVATE void sqlite3ValueFree(sqlite3_value *v){
+ if( !v ) return;
+ sqlite3VdbeMemRelease((Mem *)v);
+ sqlite3DbFree(((Mem*)v)->db, v);
+}
+
+/*
+** The sqlite3ValueBytes() routine returns the number of bytes in the
+** sqlite3_value object assuming that it uses the encoding "enc".
+** The valueBytes() routine is a helper function.
+*/
+static SQLITE_NOINLINE int valueBytes(sqlite3_value *pVal, u8 enc){
+ return valueToText(pVal, enc)!=0 ? pVal->n : 0;
+}
+SQLITE_PRIVATE int sqlite3ValueBytes(sqlite3_value *pVal, u8 enc){
+ Mem *p = (Mem*)pVal;
+ assert( (p->flags & MEM_Null)==0 || (p->flags & (MEM_Str|MEM_Blob))==0 );
+ if( (p->flags & MEM_Str)!=0 && pVal->enc==enc ){
+ return p->n;
+ }
+ if( (p->flags & MEM_Blob)!=0 ){
+ if( p->flags & MEM_Zero ){
+ return p->n + p->u.nZero;
+ }else{
+ return p->n;
+ }
+ }
+ if( p->flags & MEM_Null ) return 0;
+ return valueBytes(pVal, enc);
+}
+
+/************** End of vdbemem.c *********************************************/
+/************** Begin file vdbeaux.c *****************************************/
+/*
+** 2003 September 6
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** This file contains code used for creating, destroying, and populating
+** a VDBE (or an "sqlite3_stmt" as it is known to the outside world.)
+*/
+/* #include "sqliteInt.h" */
+/* #include "vdbeInt.h" */
+
+/*
+** Create a new virtual database engine.
+*/
+SQLITE_PRIVATE Vdbe *sqlite3VdbeCreate(Parse *pParse){
+ sqlite3 *db = pParse->db;
+ Vdbe *p;
+ p = sqlite3DbMallocRawNN(db, sizeof(Vdbe) );
+ if( p==0 ) return 0;
+ memset(&p->aOp, 0, sizeof(Vdbe)-offsetof(Vdbe,aOp));
+ p->db = db;
+ if( db->pVdbe ){
+ db->pVdbe->pPrev = p;
+ }
+ p->pNext = db->pVdbe;
+ p->pPrev = 0;
+ db->pVdbe = p;
+ p->magic = VDBE_MAGIC_INIT;
+ p->pParse = pParse;
+ assert( pParse->aLabel==0 );
+ assert( pParse->nLabel==0 );
+ assert( pParse->nOpAlloc==0 );
+ assert( pParse->szOpAlloc==0 );
+ return p;
+}
+
+/*
+** Change the error string stored in Vdbe.zErrMsg
+*/
+SQLITE_PRIVATE void sqlite3VdbeError(Vdbe *p, const char *zFormat, ...){
+ va_list ap;
+ sqlite3DbFree(p->db, p->zErrMsg);
+ va_start(ap, zFormat);
+ p->zErrMsg = sqlite3VMPrintf(p->db, zFormat, ap);
+ va_end(ap);
+}
+
+/*
+** Remember the SQL string for a prepared statement.
+*/
+SQLITE_PRIVATE void sqlite3VdbeSetSql(Vdbe *p, const char *z, int n, int isPrepareV2){
+ assert( isPrepareV2==1 || isPrepareV2==0 );
+ if( p==0 ) return;
+#if defined(SQLITE_OMIT_TRACE) && !defined(SQLITE_ENABLE_SQLLOG)
+ if( !isPrepareV2 ) return;
+#endif
+ assert( p->zSql==0 );
+ p->zSql = sqlite3DbStrNDup(p->db, z, n);
+ p->isPrepareV2 = (u8)isPrepareV2;
+}
+
+/*
+** Swap all content between two VDBE structures.
+*/
+SQLITE_PRIVATE void sqlite3VdbeSwap(Vdbe *pA, Vdbe *pB){
+ Vdbe tmp, *pTmp;
+ char *zTmp;
+ assert( pA->db==pB->db );
+ tmp = *pA;
+ *pA = *pB;
+ *pB = tmp;
+ pTmp = pA->pNext;
+ pA->pNext = pB->pNext;
+ pB->pNext = pTmp;
+ pTmp = pA->pPrev;
+ pA->pPrev = pB->pPrev;
+ pB->pPrev = pTmp;
+ zTmp = pA->zSql;
+ pA->zSql = pB->zSql;
+ pB->zSql = zTmp;
+ pB->isPrepareV2 = pA->isPrepareV2;
+}
+
+/*
+** Resize the Vdbe.aOp array so that it is at least nOp elements larger
+** than its current size. nOp is guaranteed to be less than or equal
+** to 1024/sizeof(Op).
+**
+** If an out-of-memory error occurs while resizing the array, return
+** SQLITE_NOMEM. In this case Vdbe.aOp and Parse.nOpAlloc remain
+** unchanged (this is so that any opcodes already allocated can be
+** correctly deallocated along with the rest of the Vdbe).
+*/
+static int growOpArray(Vdbe *v, int nOp){
+ VdbeOp *pNew;
+ Parse *p = v->pParse;
+
+ /* The SQLITE_TEST_REALLOC_STRESS compile-time option is designed to force
+ ** more frequent reallocs and hence provide more opportunities for
+ ** simulated OOM faults. SQLITE_TEST_REALLOC_STRESS is generally used
+ ** during testing only. With SQLITE_TEST_REALLOC_STRESS grow the op array
+ ** by the minimum* amount required until the size reaches 512. Normal
+ ** operation (without SQLITE_TEST_REALLOC_STRESS) is to double the current
+ ** size of the op array or add 1KB of space, whichever is smaller. */
+#ifdef SQLITE_TEST_REALLOC_STRESS
+ int nNew = (p->nOpAlloc>=512 ? p->nOpAlloc*2 : p->nOpAlloc+nOp);
+#else
+ int nNew = (p->nOpAlloc ? p->nOpAlloc*2 : (int)(1024/sizeof(Op)));
+ UNUSED_PARAMETER(nOp);
+#endif
+
+ assert( nOp<=(1024/sizeof(Op)) );
+ assert( nNew>=(p->nOpAlloc+nOp) );
+ pNew = sqlite3DbRealloc(p->db, v->aOp, nNew*sizeof(Op));
+ if( pNew ){
+ p->szOpAlloc = sqlite3DbMallocSize(p->db, pNew);
+ p->nOpAlloc = p->szOpAlloc/sizeof(Op);
+ v->aOp = pNew;
+ }
+ return (pNew ? SQLITE_OK : SQLITE_NOMEM_BKPT);
+}
+
+#ifdef SQLITE_DEBUG
+/* This routine is just a convenient place to set a breakpoint that will
+** fire after each opcode is inserted and displayed using
+** "PRAGMA vdbe_addoptrace=on".
+*/
+static void test_addop_breakpoint(void){
+ static int n = 0;
+ n++;
+}
+#endif
+
+/*
+** Add a new instruction to the list of instructions current in the
+** VDBE. Return the address of the new instruction.
+**
+** Parameters:
+**
+** p Pointer to the VDBE
+**
+** op The opcode for this instruction
+**
+** p1, p2, p3 Operands
+**
+** Use the sqlite3VdbeResolveLabel() function to fix an address and
+** the sqlite3VdbeChangeP4() function to change the value of the P4
+** operand.
+*/
+static SQLITE_NOINLINE int growOp3(Vdbe *p, int op, int p1, int p2, int p3){
+ assert( p->pParse->nOpAlloc<=p->nOp );
+ if( growOpArray(p, 1) ) return 1;
+ assert( p->pParse->nOpAlloc>p->nOp );
+ return sqlite3VdbeAddOp3(p, op, p1, p2, p3);
+}
+SQLITE_PRIVATE int sqlite3VdbeAddOp3(Vdbe *p, int op, int p1, int p2, int p3){
+ int i;
+ VdbeOp *pOp;
+
+ i = p->nOp;
+ assert( p->magic==VDBE_MAGIC_INIT );
+ assert( op>=0 && op<0xff );
+ if( p->pParse->nOpAlloc<=i ){
+ return growOp3(p, op, p1, p2, p3);
+ }
+ p->nOp++;
+ pOp = &p->aOp[i];
+ pOp->opcode = (u8)op;
+ pOp->p5 = 0;
+ pOp->p1 = p1;
+ pOp->p2 = p2;
+ pOp->p3 = p3;
+ pOp->p4.p = 0;
+ pOp->p4type = P4_NOTUSED;
+#ifdef SQLITE_ENABLE_EXPLAIN_COMMENTS
+ pOp->zComment = 0;
+#endif
+#ifdef SQLITE_DEBUG
+ if( p->db->flags & SQLITE_VdbeAddopTrace ){
+ int jj, kk;
+ Parse *pParse = p->pParse;
+ for(jj=kk=0; jj<pParse->nColCache; jj++){
+ struct yColCache *x = pParse->aColCache + jj;
+ printf(" r[%d]={%d:%d}", x->iReg, x->iTable, x->iColumn);
+ kk++;
+ }
+ if( kk ) printf("\n");
+ sqlite3VdbePrintOp(0, i, &p->aOp[i]);
+ test_addop_breakpoint();
+ }
+#endif
+#ifdef VDBE_PROFILE
+ pOp->cycles = 0;
+ pOp->cnt = 0;
+#endif
+#ifdef SQLITE_VDBE_COVERAGE
+ pOp->iSrcLine = 0;
+#endif
+ return i;
+}
+SQLITE_PRIVATE int sqlite3VdbeAddOp0(Vdbe *p, int op){
+ return sqlite3VdbeAddOp3(p, op, 0, 0, 0);
+}
+SQLITE_PRIVATE int sqlite3VdbeAddOp1(Vdbe *p, int op, int p1){
+ return sqlite3VdbeAddOp3(p, op, p1, 0, 0);
+}
+SQLITE_PRIVATE int sqlite3VdbeAddOp2(Vdbe *p, int op, int p1, int p2){
+ return sqlite3VdbeAddOp3(p, op, p1, p2, 0);
+}
+
+/* Generate code for an unconditional jump to instruction iDest
+*/
+SQLITE_PRIVATE int sqlite3VdbeGoto(Vdbe *p, int iDest){
+ return sqlite3VdbeAddOp3(p, OP_Goto, 0, iDest, 0);
+}
+
+/* Generate code to cause the string zStr to be loaded into
+** register iDest
+*/
+SQLITE_PRIVATE int sqlite3VdbeLoadString(Vdbe *p, int iDest, const char *zStr){
+ return sqlite3VdbeAddOp4(p, OP_String8, 0, iDest, 0, zStr, 0);
+}
+
+/*
+** Generate code that initializes multiple registers to string or integer
+** constants. The registers begin with iDest and increase consecutively.
+** One register is initialized for each characgter in zTypes[]. For each
+** "s" character in zTypes[], the register is a string if the argument is
+** not NULL, or OP_Null if the value is a null pointer. For each "i" character
+** in zTypes[], the register is initialized to an integer.
+*/
+SQLITE_PRIVATE void sqlite3VdbeMultiLoad(Vdbe *p, int iDest, const char *zTypes, ...){
+ va_list ap;
+ int i;
+ char c;
+ va_start(ap, zTypes);
+ for(i=0; (c = zTypes[i])!=0; i++){
+ if( c=='s' ){
+ const char *z = va_arg(ap, const char*);
+ sqlite3VdbeAddOp4(p, z==0 ? OP_Null : OP_String8, 0, iDest++, 0, z, 0);
+ }else{
+ assert( c=='i' );
+ sqlite3VdbeAddOp2(p, OP_Integer, va_arg(ap, int), iDest++);
+ }
+ }
+ va_end(ap);
+}
+
+/*
+** Add an opcode that includes the p4 value as a pointer.
+*/
+SQLITE_PRIVATE int sqlite3VdbeAddOp4(
+ Vdbe *p, /* Add the opcode to this VM */
+ int op, /* The new opcode */
+ int p1, /* The P1 operand */
+ int p2, /* The P2 operand */
+ int p3, /* The P3 operand */
+ const char *zP4, /* The P4 operand */
+ int p4type /* P4 operand type */
+){
+ int addr = sqlite3VdbeAddOp3(p, op, p1, p2, p3);
+ sqlite3VdbeChangeP4(p, addr, zP4, p4type);
+ return addr;
+}
+
+/*
+** Add an opcode that includes the p4 value with a P4_INT64 or
+** P4_REAL type.
+*/
+SQLITE_PRIVATE int sqlite3VdbeAddOp4Dup8(
+ Vdbe *p, /* Add the opcode to this VM */
+ int op, /* The new opcode */
+ int p1, /* The P1 operand */
+ int p2, /* The P2 operand */
+ int p3, /* The P3 operand */
+ const u8 *zP4, /* The P4 operand */
+ int p4type /* P4 operand type */
+){
+ char *p4copy = sqlite3DbMallocRawNN(sqlite3VdbeDb(p), 8);
+ if( p4copy ) memcpy(p4copy, zP4, 8);
+ return sqlite3VdbeAddOp4(p, op, p1, p2, p3, p4copy, p4type);
+}
+
+/*
+** Add an OP_ParseSchema opcode. This routine is broken out from
+** sqlite3VdbeAddOp4() since it needs to also needs to mark all btrees
+** as having been used.
+**
+** The zWhere string must have been obtained from sqlite3_malloc().
+** This routine will take ownership of the allocated memory.
+*/
+SQLITE_PRIVATE void sqlite3VdbeAddParseSchemaOp(Vdbe *p, int iDb, char *zWhere){
+ int j;
+ sqlite3VdbeAddOp4(p, OP_ParseSchema, iDb, 0, 0, zWhere, P4_DYNAMIC);
+ for(j=0; j<p->db->nDb; j++) sqlite3VdbeUsesBtree(p, j);
+}
+
+/*
+** Add an opcode that includes the p4 value as an integer.
+*/
+SQLITE_PRIVATE int sqlite3VdbeAddOp4Int(
+ Vdbe *p, /* Add the opcode to this VM */
+ int op, /* The new opcode */
+ int p1, /* The P1 operand */
+ int p2, /* The P2 operand */
+ int p3, /* The P3 operand */
+ int p4 /* The P4 operand as an integer */
+){
+ int addr = sqlite3VdbeAddOp3(p, op, p1, p2, p3);
+ if( p->db->mallocFailed==0 ){
+ VdbeOp *pOp = &p->aOp[addr];
+ pOp->p4type = P4_INT32;
+ pOp->p4.i = p4;
+ }
+ return addr;
+}
+
+/* Insert the end of a co-routine
+*/
+SQLITE_PRIVATE void sqlite3VdbeEndCoroutine(Vdbe *v, int regYield){
+ sqlite3VdbeAddOp1(v, OP_EndCoroutine, regYield);
+
+ /* Clear the temporary register cache, thereby ensuring that each
+ ** co-routine has its own independent set of registers, because co-routines
+ ** might expect their registers to be preserved across an OP_Yield, and
+ ** that could cause problems if two or more co-routines are using the same
+ ** temporary register.
+ */
+ v->pParse->nTempReg = 0;
+ v->pParse->nRangeReg = 0;
+}
+
+/*
+** Create a new symbolic label for an instruction that has yet to be
+** coded. The symbolic label is really just a negative number. The
+** label can be used as the P2 value of an operation. Later, when
+** the label is resolved to a specific address, the VDBE will scan
+** through its operation list and change all values of P2 which match
+** the label into the resolved address.
+**
+** The VDBE knows that a P2 value is a label because labels are
+** always negative and P2 values are suppose to be non-negative.
+** Hence, a negative P2 value is a label that has yet to be resolved.
+**
+** Zero is returned if a malloc() fails.
+*/
+SQLITE_PRIVATE int sqlite3VdbeMakeLabel(Vdbe *v){
+ Parse *p = v->pParse;
+ int i = p->nLabel++;
+ assert( v->magic==VDBE_MAGIC_INIT );
+ if( (i & (i-1))==0 ){
+ p->aLabel = sqlite3DbReallocOrFree(p->db, p->aLabel,
+ (i*2+1)*sizeof(p->aLabel[0]));
+ }
+ if( p->aLabel ){
+ p->aLabel[i] = -1;
+ }
+ return ADDR(i);
+}
+
+/*
+** Resolve label "x" to be the address of the next instruction to
+** be inserted. The parameter "x" must have been obtained from
+** a prior call to sqlite3VdbeMakeLabel().
+*/
+SQLITE_PRIVATE void sqlite3VdbeResolveLabel(Vdbe *v, int x){
+ Parse *p = v->pParse;
+ int j = ADDR(x);
+ assert( v->magic==VDBE_MAGIC_INIT );
+ assert( j<p->nLabel );
+ assert( j>=0 );
+ if( p->aLabel ){
+ p->aLabel[j] = v->nOp;
+ }
+}
+
+/*
+** Mark the VDBE as one that can only be run one time.
+*/
+SQLITE_PRIVATE void sqlite3VdbeRunOnlyOnce(Vdbe *p){
+ p->runOnlyOnce = 1;
+}
+
+/*
+** Mark the VDBE as one that can only be run multiple times.
+*/
+SQLITE_PRIVATE void sqlite3VdbeReusable(Vdbe *p){
+ p->runOnlyOnce = 0;
+}
+
+#ifdef SQLITE_DEBUG /* sqlite3AssertMayAbort() logic */
+
+/*
+** The following type and function are used to iterate through all opcodes
+** in a Vdbe main program and each of the sub-programs (triggers) it may
+** invoke directly or indirectly. It should be used as follows:
+**
+** Op *pOp;
+** VdbeOpIter sIter;
+**
+** memset(&sIter, 0, sizeof(sIter));
+** sIter.v = v; // v is of type Vdbe*
+** while( (pOp = opIterNext(&sIter)) ){
+** // Do something with pOp
+** }
+** sqlite3DbFree(v->db, sIter.apSub);
+**
+*/
+typedef struct VdbeOpIter VdbeOpIter;
+struct VdbeOpIter {
+ Vdbe *v; /* Vdbe to iterate through the opcodes of */
+ SubProgram **apSub; /* Array of subprograms */
+ int nSub; /* Number of entries in apSub */
+ int iAddr; /* Address of next instruction to return */
+ int iSub; /* 0 = main program, 1 = first sub-program etc. */
+};
+static Op *opIterNext(VdbeOpIter *p){
+ Vdbe *v = p->v;
+ Op *pRet = 0;
+ Op *aOp;
+ int nOp;
+
+ if( p->iSub<=p->nSub ){
+
+ if( p->iSub==0 ){
+ aOp = v->aOp;
+ nOp = v->nOp;
+ }else{
+ aOp = p->apSub[p->iSub-1]->aOp;
+ nOp = p->apSub[p->iSub-1]->nOp;
+ }
+ assert( p->iAddr<nOp );
+
+ pRet = &aOp[p->iAddr];
+ p->iAddr++;
+ if( p->iAddr==nOp ){
+ p->iSub++;
+ p->iAddr = 0;
+ }
+
+ if( pRet->p4type==P4_SUBPROGRAM ){
+ int nByte = (p->nSub+1)*sizeof(SubProgram*);
+ int j;
+ for(j=0; j<p->nSub; j++){
+ if( p->apSub[j]==pRet->p4.pProgram ) break;
+ }
+ if( j==p->nSub ){
+ p->apSub = sqlite3DbReallocOrFree(v->db, p->apSub, nByte);
+ if( !p->apSub ){
+ pRet = 0;
+ }else{
+ p->apSub[p->nSub++] = pRet->p4.pProgram;
+ }
+ }
+ }
+ }
+
+ return pRet;
+}
+
+/*
+** Check if the program stored in the VM associated with pParse may
+** throw an ABORT exception (causing the statement, but not entire transaction
+** to be rolled back). This condition is true if the main program or any
+** sub-programs contains any of the following:
+**
+** * OP_Halt with P1=SQLITE_CONSTRAINT and P2=OE_Abort.
+** * OP_HaltIfNull with P1=SQLITE_CONSTRAINT and P2=OE_Abort.
+** * OP_Destroy
+** * OP_VUpdate
+** * OP_VRename
+** * OP_FkCounter with P2==0 (immediate foreign key constraint)
+** * OP_CreateTable and OP_InitCoroutine (for CREATE TABLE AS SELECT ...)
+**
+** Then check that the value of Parse.mayAbort is true if an
+** ABORT may be thrown, or false otherwise. Return true if it does
+** match, or false otherwise. This function is intended to be used as
+** part of an assert statement in the compiler. Similar to:
+**
+** assert( sqlite3VdbeAssertMayAbort(pParse->pVdbe, pParse->mayAbort) );
+*/
+SQLITE_PRIVATE int sqlite3VdbeAssertMayAbort(Vdbe *v, int mayAbort){
+ int hasAbort = 0;
+ int hasFkCounter = 0;
+ int hasCreateTable = 0;
+ int hasInitCoroutine = 0;
+ Op *pOp;
+ VdbeOpIter sIter;
+ memset(&sIter, 0, sizeof(sIter));
+ sIter.v = v;
+
+ while( (pOp = opIterNext(&sIter))!=0 ){
+ int opcode = pOp->opcode;
+ if( opcode==OP_Destroy || opcode==OP_VUpdate || opcode==OP_VRename
+ || ((opcode==OP_Halt || opcode==OP_HaltIfNull)
+ && ((pOp->p1&0xff)==SQLITE_CONSTRAINT && pOp->p2==OE_Abort))
+ ){
+ hasAbort = 1;
+ break;
+ }
+ if( opcode==OP_CreateTable ) hasCreateTable = 1;
+ if( opcode==OP_InitCoroutine ) hasInitCoroutine = 1;
+#ifndef SQLITE_OMIT_FOREIGN_KEY
+ if( opcode==OP_FkCounter && pOp->p1==0 && pOp->p2==1 ){
+ hasFkCounter = 1;
+ }
+#endif
+ }
+ sqlite3DbFree(v->db, sIter.apSub);
+
+ /* Return true if hasAbort==mayAbort. Or if a malloc failure occurred.
+ ** If malloc failed, then the while() loop above may not have iterated
+ ** through all opcodes and hasAbort may be set incorrectly. Return
+ ** true for this case to prevent the assert() in the callers frame
+ ** from failing. */
+ return ( v->db->mallocFailed || hasAbort==mayAbort || hasFkCounter
+ || (hasCreateTable && hasInitCoroutine) );
+}
+#endif /* SQLITE_DEBUG - the sqlite3AssertMayAbort() function */
+
+/*
+** This routine is called after all opcodes have been inserted. It loops
+** through all the opcodes and fixes up some details.
+**
+** (1) For each jump instruction with a negative P2 value (a label)
+** resolve the P2 value to an actual address.
+**
+** (2) Compute the maximum number of arguments used by any SQL function
+** and store that value in *pMaxFuncArgs.
+**
+** (3) Update the Vdbe.readOnly and Vdbe.bIsReader flags to accurately
+** indicate what the prepared statement actually does.
+**
+** (4) Initialize the p4.xAdvance pointer on opcodes that use it.
+**
+** (5) Reclaim the memory allocated for storing labels.
+**
+** This routine will only function correctly if the mkopcodeh.tcl generator
+** script numbers the opcodes correctly. Changes to this routine must be
+** coordinated with changes to mkopcodeh.tcl.
+*/
+static void resolveP2Values(Vdbe *p, int *pMaxFuncArgs){
+ int nMaxArgs = *pMaxFuncArgs;
+ Op *pOp;
+ Parse *pParse = p->pParse;
+ int *aLabel = pParse->aLabel;
+ p->readOnly = 1;
+ p->bIsReader = 0;
+ pOp = &p->aOp[p->nOp-1];
+ while(1){
+
+ /* Only JUMP opcodes and the short list of special opcodes in the switch
+ ** below need to be considered. The mkopcodeh.tcl generator script groups
+ ** all these opcodes together near the front of the opcode list. Skip
+ ** any opcode that does not need processing by virtual of the fact that
+ ** it is larger than SQLITE_MX_JUMP_OPCODE, as a performance optimization.
+ */
+ if( pOp->opcode<=SQLITE_MX_JUMP_OPCODE ){
+ /* NOTE: Be sure to update mkopcodeh.tcl when adding or removing
+ ** cases from this switch! */
+ switch( pOp->opcode ){
+ case OP_Transaction: {
+ if( pOp->p2!=0 ) p->readOnly = 0;
+ /* fall thru */
+ }
+ case OP_AutoCommit:
+ case OP_Savepoint: {
+ p->bIsReader = 1;
+ break;
+ }
+#ifndef SQLITE_OMIT_WAL
+ case OP_Checkpoint:
+#endif
+ case OP_Vacuum:
+ case OP_JournalMode: {
+ p->readOnly = 0;
+ p->bIsReader = 1;
+ break;
+ }
+#ifndef SQLITE_OMIT_VIRTUALTABLE
+ case OP_VUpdate: {
+ if( pOp->p2>nMaxArgs ) nMaxArgs = pOp->p2;
+ break;
+ }
+ case OP_VFilter: {
+ int n;
+ assert( (pOp - p->aOp) >= 3 );
+ assert( pOp[-1].opcode==OP_Integer );
+ n = pOp[-1].p1;
+ if( n>nMaxArgs ) nMaxArgs = n;
+ break;
+ }
+#endif
+ case OP_Next:
+ case OP_NextIfOpen:
+ case OP_SorterNext: {
+ pOp->p4.xAdvance = sqlite3BtreeNext;
+ pOp->p4type = P4_ADVANCE;
+ break;
+ }
+ case OP_Prev:
+ case OP_PrevIfOpen: {
+ pOp->p4.xAdvance = sqlite3BtreePrevious;
+ pOp->p4type = P4_ADVANCE;
+ break;
+ }
+ }
+ if( (sqlite3OpcodeProperty[pOp->opcode] & OPFLG_JUMP)!=0 && pOp->p2<0 ){
+ assert( ADDR(pOp->p2)<pParse->nLabel );
+ pOp->p2 = aLabel[ADDR(pOp->p2)];
+ }
+ }
+ if( pOp==p->aOp ) break;
+ pOp--;
+ }
+ sqlite3DbFree(p->db, pParse->aLabel);
+ pParse->aLabel = 0;
+ pParse->nLabel = 0;
+ *pMaxFuncArgs = nMaxArgs;
+ assert( p->bIsReader!=0 || DbMaskAllZero(p->btreeMask) );
+}
+
+/*
+** Return the address of the next instruction to be inserted.
+*/
+SQLITE_PRIVATE int sqlite3VdbeCurrentAddr(Vdbe *p){
+ assert( p->magic==VDBE_MAGIC_INIT );
+ return p->nOp;
+}
+
+/*
+** Verify that at least N opcode slots are available in p without
+** having to malloc for more space (except when compiled using
+** SQLITE_TEST_REALLOC_STRESS). This interface is used during testing
+** to verify that certain calls to sqlite3VdbeAddOpList() can never
+** fail due to a OOM fault and hence that the return value from
+** sqlite3VdbeAddOpList() will always be non-NULL.
+*/
+#if defined(SQLITE_DEBUG) && !defined(SQLITE_TEST_REALLOC_STRESS)
+SQLITE_PRIVATE void sqlite3VdbeVerifyNoMallocRequired(Vdbe *p, int N){
+ assert( p->nOp + N <= p->pParse->nOpAlloc );
+}
+#endif
+
+/*
+** Verify that the VM passed as the only argument does not contain
+** an OP_ResultRow opcode. Fail an assert() if it does. This is used
+** by code in pragma.c to ensure that the implementation of certain
+** pragmas comports with the flags specified in the mkpragmatab.tcl
+** script.
+*/
+#if defined(SQLITE_DEBUG) && !defined(SQLITE_TEST_REALLOC_STRESS)
+SQLITE_PRIVATE void sqlite3VdbeVerifyNoResultRow(Vdbe *p){
+ int i;
+ for(i=0; i<p->nOp; i++){
+ assert( p->aOp[i].opcode!=OP_ResultRow );
+ }
+}
+#endif
+
+/*
+** This function returns a pointer to the array of opcodes associated with
+** the Vdbe passed as the first argument. It is the callers responsibility
+** to arrange for the returned array to be eventually freed using the
+** vdbeFreeOpArray() function.
+**
+** Before returning, *pnOp is set to the number of entries in the returned
+** array. Also, *pnMaxArg is set to the larger of its current value and
+** the number of entries in the Vdbe.apArg[] array required to execute the
+** returned program.
+*/
+SQLITE_PRIVATE VdbeOp *sqlite3VdbeTakeOpArray(Vdbe *p, int *pnOp, int *pnMaxArg){
+ VdbeOp *aOp = p->aOp;
+ assert( aOp && !p->db->mallocFailed );
+
+ /* Check that sqlite3VdbeUsesBtree() was not called on this VM */
+ assert( DbMaskAllZero(p->btreeMask) );
+
+ resolveP2Values(p, pnMaxArg);
+ *pnOp = p->nOp;
+ p->aOp = 0;
+ return aOp;
+}
+
+/*
+** Add a whole list of operations to the operation stack. Return a
+** pointer to the first operation inserted.
+**
+** Non-zero P2 arguments to jump instructions are automatically adjusted
+** so that the jump target is relative to the first operation inserted.
+*/
+SQLITE_PRIVATE VdbeOp *sqlite3VdbeAddOpList(
+ Vdbe *p, /* Add opcodes to the prepared statement */
+ int nOp, /* Number of opcodes to add */
+ VdbeOpList const *aOp, /* The opcodes to be added */
+ int iLineno /* Source-file line number of first opcode */
+){
+ int i;
+ VdbeOp *pOut, *pFirst;
+ assert( nOp>0 );
+ assert( p->magic==VDBE_MAGIC_INIT );
+ if( p->nOp + nOp > p->pParse->nOpAlloc && growOpArray(p, nOp) ){
+ return 0;
+ }
+ pFirst = pOut = &p->aOp[p->nOp];
+ for(i=0; i<nOp; i++, aOp++, pOut++){
+ pOut->opcode = aOp->opcode;
+ pOut->p1 = aOp->p1;
+ pOut->p2 = aOp->p2;
+ assert( aOp->p2>=0 );
+ if( (sqlite3OpcodeProperty[aOp->opcode] & OPFLG_JUMP)!=0 && aOp->p2>0 ){
+ pOut->p2 += p->nOp;
+ }
+ pOut->p3 = aOp->p3;
+ pOut->p4type = P4_NOTUSED;
+ pOut->p4.p = 0;
+ pOut->p5 = 0;
+#ifdef SQLITE_ENABLE_EXPLAIN_COMMENTS
+ pOut->zComment = 0;
+#endif
+#ifdef SQLITE_VDBE_COVERAGE
+ pOut->iSrcLine = iLineno+i;
+#else
+ (void)iLineno;
+#endif
+#ifdef SQLITE_DEBUG
+ if( p->db->flags & SQLITE_VdbeAddopTrace ){
+ sqlite3VdbePrintOp(0, i+p->nOp, &p->aOp[i+p->nOp]);
+ }
+#endif
+ }
+ p->nOp += nOp;
+ return pFirst;
+}
+
+#if defined(SQLITE_ENABLE_STMT_SCANSTATUS)
+/*
+** Add an entry to the array of counters managed by sqlite3_stmt_scanstatus().
+*/
+SQLITE_PRIVATE void sqlite3VdbeScanStatus(
+ Vdbe *p, /* VM to add scanstatus() to */
+ int addrExplain, /* Address of OP_Explain (or 0) */
+ int addrLoop, /* Address of loop counter */
+ int addrVisit, /* Address of rows visited counter */
+ LogEst nEst, /* Estimated number of output rows */
+ const char *zName /* Name of table or index being scanned */
+){
+ int nByte = (p->nScan+1) * sizeof(ScanStatus);
+ ScanStatus *aNew;
+ aNew = (ScanStatus*)sqlite3DbRealloc(p->db, p->aScan, nByte);
+ if( aNew ){
+ ScanStatus *pNew = &aNew[p->nScan++];
+ pNew->addrExplain = addrExplain;
+ pNew->addrLoop = addrLoop;
+ pNew->addrVisit = addrVisit;
+ pNew->nEst = nEst;
+ pNew->zName = sqlite3DbStrDup(p->db, zName);
+ p->aScan = aNew;
+ }
+}
+#endif
+
+
+/*
+** Change the value of the opcode, or P1, P2, P3, or P5 operands
+** for a specific instruction.
+*/
+SQLITE_PRIVATE void sqlite3VdbeChangeOpcode(Vdbe *p, u32 addr, u8 iNewOpcode){
+ sqlite3VdbeGetOp(p,addr)->opcode = iNewOpcode;
+}
+SQLITE_PRIVATE void sqlite3VdbeChangeP1(Vdbe *p, u32 addr, int val){
+ sqlite3VdbeGetOp(p,addr)->p1 = val;
+}
+SQLITE_PRIVATE void sqlite3VdbeChangeP2(Vdbe *p, u32 addr, int val){
+ sqlite3VdbeGetOp(p,addr)->p2 = val;
+}
+SQLITE_PRIVATE void sqlite3VdbeChangeP3(Vdbe *p, u32 addr, int val){
+ sqlite3VdbeGetOp(p,addr)->p3 = val;
+}
+SQLITE_PRIVATE void sqlite3VdbeChangeP5(Vdbe *p, u16 p5){
+ assert( p->nOp>0 || p->db->mallocFailed );
+ if( p->nOp>0 ) p->aOp[p->nOp-1].p5 = p5;
+}
+
+/*
+** Change the P2 operand of instruction addr so that it points to
+** the address of the next instruction to be coded.
+*/
+SQLITE_PRIVATE void sqlite3VdbeJumpHere(Vdbe *p, int addr){
+ sqlite3VdbeChangeP2(p, addr, p->nOp);
+}
+
+
+/*
+** If the input FuncDef structure is ephemeral, then free it. If
+** the FuncDef is not ephermal, then do nothing.
+*/
+static void freeEphemeralFunction(sqlite3 *db, FuncDef *pDef){
+ if( (pDef->funcFlags & SQLITE_FUNC_EPHEM)!=0 ){
+ sqlite3DbFree(db, pDef);
+ }
+}
+
+static void vdbeFreeOpArray(sqlite3 *, Op *, int);
+
+/*
+** Delete a P4 value if necessary.
+*/
+static SQLITE_NOINLINE void freeP4Mem(sqlite3 *db, Mem *p){
+ if( p->szMalloc ) sqlite3DbFree(db, p->zMalloc);
+ sqlite3DbFree(db, p);
+}
+static SQLITE_NOINLINE void freeP4FuncCtx(sqlite3 *db, sqlite3_context *p){
+ freeEphemeralFunction(db, p->pFunc);
+ sqlite3DbFree(db, p);
+}
+static void freeP4(sqlite3 *db, int p4type, void *p4){
+ assert( db );
+ switch( p4type ){
+ case P4_FUNCCTX: {
+ freeP4FuncCtx(db, (sqlite3_context*)p4);
+ break;
+ }
+ case P4_REAL:
+ case P4_INT64:
+ case P4_DYNAMIC:
+ case P4_INTARRAY: {
+ sqlite3DbFree(db, p4);
+ break;
+ }
+ case P4_KEYINFO: {
+ if( db->pnBytesFreed==0 ) sqlite3KeyInfoUnref((KeyInfo*)p4);
+ break;
+ }
+#ifdef SQLITE_ENABLE_CURSOR_HINTS
+ case P4_EXPR: {
+ sqlite3ExprDelete(db, (Expr*)p4);
+ break;
+ }
+#endif
+ case P4_FUNCDEF: {
+ freeEphemeralFunction(db, (FuncDef*)p4);
+ break;
+ }
+ case P4_MEM: {
+ if( db->pnBytesFreed==0 ){
+ sqlite3ValueFree((sqlite3_value*)p4);
+ }else{
+ freeP4Mem(db, (Mem*)p4);
+ }
+ break;
+ }
+ case P4_VTAB : {
+ if( db->pnBytesFreed==0 ) sqlite3VtabUnlock((VTable *)p4);
+ break;
+ }
+ }
+}
+
+/*
+** Free the space allocated for aOp and any p4 values allocated for the
+** opcodes contained within. If aOp is not NULL it is assumed to contain
+** nOp entries.
+*/
+static void vdbeFreeOpArray(sqlite3 *db, Op *aOp, int nOp){
+ if( aOp ){
+ Op *pOp;
+ for(pOp=aOp; pOp<&aOp[nOp]; pOp++){
+ if( pOp->p4type ) freeP4(db, pOp->p4type, pOp->p4.p);
+#ifdef SQLITE_ENABLE_EXPLAIN_COMMENTS
+ sqlite3DbFree(db, pOp->zComment);
+#endif
+ }
+ }
+ sqlite3DbFree(db, aOp);
+}
+
+/*
+** Link the SubProgram object passed as the second argument into the linked
+** list at Vdbe.pSubProgram. This list is used to delete all sub-program
+** objects when the VM is no longer required.
+*/
+SQLITE_PRIVATE void sqlite3VdbeLinkSubProgram(Vdbe *pVdbe, SubProgram *p){
+ p->pNext = pVdbe->pProgram;
+ pVdbe->pProgram = p;
+}
+
+/*
+** Change the opcode at addr into OP_Noop
+*/
+SQLITE_PRIVATE int sqlite3VdbeChangeToNoop(Vdbe *p, int addr){
+ VdbeOp *pOp;
+ if( p->db->mallocFailed ) return 0;
+ assert( addr>=0 && addr<p->nOp );
+ pOp = &p->aOp[addr];
+ freeP4(p->db, pOp->p4type, pOp->p4.p);
+ pOp->p4type = P4_NOTUSED;
+ pOp->p4.z = 0;
+ pOp->opcode = OP_Noop;
+ return 1;
+}
+
+/*
+** If the last opcode is "op" and it is not a jump destination,
+** then remove it. Return true if and only if an opcode was removed.
+*/
+SQLITE_PRIVATE int sqlite3VdbeDeletePriorOpcode(Vdbe *p, u8 op){
+ if( p->nOp>0 && p->aOp[p->nOp-1].opcode==op ){
+ return sqlite3VdbeChangeToNoop(p, p->nOp-1);
+ }else{
+ return 0;
+ }
+}
+
+/*
+** Change the value of the P4 operand for a specific instruction.
+** This routine is useful when a large program is loaded from a
+** static array using sqlite3VdbeAddOpList but we want to make a
+** few minor changes to the program.
+**
+** If n>=0 then the P4 operand is dynamic, meaning that a copy of
+** the string is made into memory obtained from sqlite3_malloc().
+** A value of n==0 means copy bytes of zP4 up to and including the
+** first null byte. If n>0 then copy n+1 bytes of zP4.
+**
+** Other values of n (P4_STATIC, P4_COLLSEQ etc.) indicate that zP4 points
+** to a string or structure that is guaranteed to exist for the lifetime of
+** the Vdbe. In these cases we can just copy the pointer.
+**
+** If addr<0 then change P4 on the most recently inserted instruction.
+*/
+static void SQLITE_NOINLINE vdbeChangeP4Full(
+ Vdbe *p,
+ Op *pOp,
+ const char *zP4,
+ int n
+){
+ if( pOp->p4type ){
+ freeP4(p->db, pOp->p4type, pOp->p4.p);
+ pOp->p4type = 0;
+ pOp->p4.p = 0;
+ }
+ if( n<0 ){
+ sqlite3VdbeChangeP4(p, (int)(pOp - p->aOp), zP4, n);
+ }else{
+ if( n==0 ) n = sqlite3Strlen30(zP4);
+ pOp->p4.z = sqlite3DbStrNDup(p->db, zP4, n);
+ pOp->p4type = P4_DYNAMIC;
+ }
+}
+SQLITE_PRIVATE void sqlite3VdbeChangeP4(Vdbe *p, int addr, const char *zP4, int n){
+ Op *pOp;
+ sqlite3 *db;
+ assert( p!=0 );
+ db = p->db;
+ assert( p->magic==VDBE_MAGIC_INIT );
+ assert( p->aOp!=0 || db->mallocFailed );
+ if( db->mallocFailed ){
+ if( n!=P4_VTAB ) freeP4(db, n, (void*)*(char**)&zP4);
+ return;
+ }
+ assert( p->nOp>0 );
+ assert( addr<p->nOp );
+ if( addr<0 ){
+ addr = p->nOp - 1;
+ }
+ pOp = &p->aOp[addr];
+ if( n>=0 || pOp->p4type ){
+ vdbeChangeP4Full(p, pOp, zP4, n);
+ return;
+ }
+ if( n==P4_INT32 ){
+ /* Note: this cast is safe, because the origin data point was an int
+ ** that was cast to a (const char *). */
+ pOp->p4.i = SQLITE_PTR_TO_INT(zP4);
+ pOp->p4type = P4_INT32;
+ }else if( zP4!=0 ){
+ assert( n<0 );
+ pOp->p4.p = (void*)zP4;
+ pOp->p4type = (signed char)n;
+ if( n==P4_VTAB ) sqlite3VtabLock((VTable*)zP4);
+ }
+}
+
+/*
+** Change the P4 operand of the most recently coded instruction
+** to the value defined by the arguments. This is a high-speed
+** version of sqlite3VdbeChangeP4().
+**
+** The P4 operand must not have been previously defined. And the new
+** P4 must not be P4_INT32. Use sqlite3VdbeChangeP4() in either of
+** those cases.
+*/
+SQLITE_PRIVATE void sqlite3VdbeAppendP4(Vdbe *p, void *pP4, int n){
+ VdbeOp *pOp;
+ assert( n!=P4_INT32 && n!=P4_VTAB );
+ assert( n<=0 );
+ if( p->db->mallocFailed ){
+ freeP4(p->db, n, pP4);
+ }else{
+ assert( pP4!=0 );
+ assert( p->nOp>0 );
+ pOp = &p->aOp[p->nOp-1];
+ assert( pOp->p4type==P4_NOTUSED );
+ pOp->p4type = n;
+ pOp->p4.p = pP4;
+ }
+}
+
+/*
+** Set the P4 on the most recently added opcode to the KeyInfo for the
+** index given.
+*/
+SQLITE_PRIVATE void sqlite3VdbeSetP4KeyInfo(Parse *pParse, Index *pIdx){
+ Vdbe *v = pParse->pVdbe;
+ KeyInfo *pKeyInfo;
+ assert( v!=0 );
+ assert( pIdx!=0 );
+ pKeyInfo = sqlite3KeyInfoOfIndex(pParse, pIdx);
+ if( pKeyInfo ) sqlite3VdbeAppendP4(v, pKeyInfo, P4_KEYINFO);
+}
+
+#ifdef SQLITE_ENABLE_EXPLAIN_COMMENTS
+/*
+** Change the comment on the most recently coded instruction. Or
+** insert a No-op and add the comment to that new instruction. This
+** makes the code easier to read during debugging. None of this happens
+** in a production build.
+*/
+static void vdbeVComment(Vdbe *p, const char *zFormat, va_list ap){
+ assert( p->nOp>0 || p->aOp==0 );
+ assert( p->aOp==0 || p->aOp[p->nOp-1].zComment==0 || p->db->mallocFailed );
+ if( p->nOp ){
+ assert( p->aOp );
+ sqlite3DbFree(p->db, p->aOp[p->nOp-1].zComment);
+ p->aOp[p->nOp-1].zComment = sqlite3VMPrintf(p->db, zFormat, ap);
+ }
+}
+SQLITE_PRIVATE void sqlite3VdbeComment(Vdbe *p, const char *zFormat, ...){
+ va_list ap;
+ if( p ){
+ va_start(ap, zFormat);
+ vdbeVComment(p, zFormat, ap);
+ va_end(ap);
+ }
+}
+SQLITE_PRIVATE void sqlite3VdbeNoopComment(Vdbe *p, const char *zFormat, ...){
+ va_list ap;
+ if( p ){
+ sqlite3VdbeAddOp0(p, OP_Noop);
+ va_start(ap, zFormat);
+ vdbeVComment(p, zFormat, ap);
+ va_end(ap);
+ }
+}
+#endif /* NDEBUG */
+
+#ifdef SQLITE_VDBE_COVERAGE
+/*
+** Set the value if the iSrcLine field for the previously coded instruction.
+*/
+SQLITE_PRIVATE void sqlite3VdbeSetLineNumber(Vdbe *v, int iLine){
+ sqlite3VdbeGetOp(v,-1)->iSrcLine = iLine;
+}
+#endif /* SQLITE_VDBE_COVERAGE */
+
+/*
+** Return the opcode for a given address. If the address is -1, then
+** return the most recently inserted opcode.
+**
+** If a memory allocation error has occurred prior to the calling of this
+** routine, then a pointer to a dummy VdbeOp will be returned. That opcode
+** is readable but not writable, though it is cast to a writable value.
+** The return of a dummy opcode allows the call to continue functioning
+** after an OOM fault without having to check to see if the return from
+** this routine is a valid pointer. But because the dummy.opcode is 0,
+** dummy will never be written to. This is verified by code inspection and
+** by running with Valgrind.
+*/
+SQLITE_PRIVATE VdbeOp *sqlite3VdbeGetOp(Vdbe *p, int addr){
+ /* C89 specifies that the constant "dummy" will be initialized to all
+ ** zeros, which is correct. MSVC generates a warning, nevertheless. */
+ static VdbeOp dummy; /* Ignore the MSVC warning about no initializer */
+ assert( p->magic==VDBE_MAGIC_INIT );
+ if( addr<0 ){
+ addr = p->nOp - 1;
+ }
+ assert( (addr>=0 && addr<p->nOp) || p->db->mallocFailed );
+ if( p->db->mallocFailed ){
+ return (VdbeOp*)&dummy;
+ }else{
+ return &p->aOp[addr];
+ }
+}
+
+#if defined(SQLITE_ENABLE_EXPLAIN_COMMENTS)
+/*
+** Return an integer value for one of the parameters to the opcode pOp
+** determined by character c.
+*/
+static int translateP(char c, const Op *pOp){
+ if( c=='1' ) return pOp->p1;
+ if( c=='2' ) return pOp->p2;
+ if( c=='3' ) return pOp->p3;
+ if( c=='4' ) return pOp->p4.i;
+ return pOp->p5;
+}
+
+/*
+** Compute a string for the "comment" field of a VDBE opcode listing.
+**
+** The Synopsis: field in comments in the vdbe.c source file gets converted
+** to an extra string that is appended to the sqlite3OpcodeName(). In the
+** absence of other comments, this synopsis becomes the comment on the opcode.
+** Some translation occurs:
+**
+** "PX" -> "r[X]"
+** "PX@PY" -> "r[X..X+Y-1]" or "r[x]" if y is 0 or 1
+** "PX@PY+1" -> "r[X..X+Y]" or "r[x]" if y is 0
+** "PY..PY" -> "r[X..Y]" or "r[x]" if y<=x
+*/
+static int displayComment(
+ const Op *pOp, /* The opcode to be commented */
+ const char *zP4, /* Previously obtained value for P4 */
+ char *zTemp, /* Write result here */
+ int nTemp /* Space available in zTemp[] */
+){
+ const char *zOpName;
+ const char *zSynopsis;
+ int nOpName;
+ int ii, jj;
+ char zAlt[50];
+ zOpName = sqlite3OpcodeName(pOp->opcode);
+ nOpName = sqlite3Strlen30(zOpName);
+ if( zOpName[nOpName+1] ){
+ int seenCom = 0;
+ char c;
+ zSynopsis = zOpName += nOpName + 1;
+ if( strncmp(zSynopsis,"IF ",3)==0 ){
+ if( pOp->p5 & SQLITE_STOREP2 ){
+ sqlite3_snprintf(sizeof(zAlt), zAlt, "r[P2] = (%s)", zSynopsis+3);
+ }else{
+ sqlite3_snprintf(sizeof(zAlt), zAlt, "if %s goto P2", zSynopsis+3);
+ }
+ zSynopsis = zAlt;
+ }
+ for(ii=jj=0; jj<nTemp-1 && (c = zSynopsis[ii])!=0; ii++){
+ if( c=='P' ){
+ c = zSynopsis[++ii];
+ if( c=='4' ){
+ sqlite3_snprintf(nTemp-jj, zTemp+jj, "%s", zP4);
+ }else if( c=='X' ){
+ sqlite3_snprintf(nTemp-jj, zTemp+jj, "%s", pOp->zComment);
+ seenCom = 1;
+ }else{
+ int v1 = translateP(c, pOp);
+ int v2;
+ sqlite3_snprintf(nTemp-jj, zTemp+jj, "%d", v1);
+ if( strncmp(zSynopsis+ii+1, "@P", 2)==0 ){
+ ii += 3;
+ jj += sqlite3Strlen30(zTemp+jj);
+ v2 = translateP(zSynopsis[ii], pOp);
+ if( strncmp(zSynopsis+ii+1,"+1",2)==0 ){
+ ii += 2;
+ v2++;
+ }
+ if( v2>1 ){
+ sqlite3_snprintf(nTemp-jj, zTemp+jj, "..%d", v1+v2-1);
+ }
+ }else if( strncmp(zSynopsis+ii+1, "..P3", 4)==0 && pOp->p3==0 ){
+ ii += 4;
+ }
+ }
+ jj += sqlite3Strlen30(zTemp+jj);
+ }else{
+ zTemp[jj++] = c;
+ }
+ }
+ if( !seenCom && jj<nTemp-5 && pOp->zComment ){
+ sqlite3_snprintf(nTemp-jj, zTemp+jj, "; %s", pOp->zComment);
+ jj += sqlite3Strlen30(zTemp+jj);
+ }
+ if( jj<nTemp ) zTemp[jj] = 0;
+ }else if( pOp->zComment ){
+ sqlite3_snprintf(nTemp, zTemp, "%s", pOp->zComment);
+ jj = sqlite3Strlen30(zTemp);
+ }else{
+ zTemp[0] = 0;
+ jj = 0;
+ }
+ return jj;
+}
+#endif /* SQLITE_DEBUG */
+
+#if VDBE_DISPLAY_P4 && defined(SQLITE_ENABLE_CURSOR_HINTS)
+/*
+** Translate the P4.pExpr value for an OP_CursorHint opcode into text
+** that can be displayed in the P4 column of EXPLAIN output.
+*/
+static void displayP4Expr(StrAccum *p, Expr *pExpr){
+ const char *zOp = 0;
+ switch( pExpr->op ){
+ case TK_STRING:
+ sqlite3XPrintf(p, "%Q", pExpr->u.zToken);
+ break;
+ case TK_INTEGER:
+ sqlite3XPrintf(p, "%d", pExpr->u.iValue);
+ break;
+ case TK_NULL:
+ sqlite3XPrintf(p, "NULL");
+ break;
+ case TK_REGISTER: {
+ sqlite3XPrintf(p, "r[%d]", pExpr->iTable);
+ break;
+ }
+ case TK_COLUMN: {
+ if( pExpr->iColumn<0 ){
+ sqlite3XPrintf(p, "rowid");
+ }else{
+ sqlite3XPrintf(p, "c%d", (int)pExpr->iColumn);
+ }
+ break;
+ }
+ case TK_LT: zOp = "LT"; break;
+ case TK_LE: zOp = "LE"; break;
+ case TK_GT: zOp = "GT"; break;
+ case TK_GE: zOp = "GE"; break;
+ case TK_NE: zOp = "NE"; break;
+ case TK_EQ: zOp = "EQ"; break;
+ case TK_IS: zOp = "IS"; break;
+ case TK_ISNOT: zOp = "ISNOT"; break;
+ case TK_AND: zOp = "AND"; break;
+ case TK_OR: zOp = "OR"; break;
+ case TK_PLUS: zOp = "ADD"; break;
+ case TK_STAR: zOp = "MUL"; break;
+ case TK_MINUS: zOp = "SUB"; break;
+ case TK_REM: zOp = "REM"; break;
+ case TK_BITAND: zOp = "BITAND"; break;
+ case TK_BITOR: zOp = "BITOR"; break;
+ case TK_SLASH: zOp = "DIV"; break;
+ case TK_LSHIFT: zOp = "LSHIFT"; break;
+ case TK_RSHIFT: zOp = "RSHIFT"; break;
+ case TK_CONCAT: zOp = "CONCAT"; break;
+ case TK_UMINUS: zOp = "MINUS"; break;
+ case TK_UPLUS: zOp = "PLUS"; break;
+ case TK_BITNOT: zOp = "BITNOT"; break;
+ case TK_NOT: zOp = "NOT"; break;
+ case TK_ISNULL: zOp = "ISNULL"; break;
+ case TK_NOTNULL: zOp = "NOTNULL"; break;
+
+ default:
+ sqlite3XPrintf(p, "%s", "expr");
+ break;
+ }
+
+ if( zOp ){
+ sqlite3XPrintf(p, "%s(", zOp);
+ displayP4Expr(p, pExpr->pLeft);
+ if( pExpr->pRight ){
+ sqlite3StrAccumAppend(p, ",", 1);
+ displayP4Expr(p, pExpr->pRight);
+ }
+ sqlite3StrAccumAppend(p, ")", 1);
+ }
+}
+#endif /* VDBE_DISPLAY_P4 && defined(SQLITE_ENABLE_CURSOR_HINTS) */
+
+
+#if VDBE_DISPLAY_P4
+/*
+** Compute a string that describes the P4 parameter for an opcode.
+** Use zTemp for any required temporary buffer space.
+*/
+static char *displayP4(Op *pOp, char *zTemp, int nTemp){
+ char *zP4 = zTemp;
+ StrAccum x;
+ assert( nTemp>=20 );
+ sqlite3StrAccumInit(&x, 0, zTemp, nTemp, 0);
+ switch( pOp->p4type ){
+ case P4_KEYINFO: {
+ int j;
+ KeyInfo *pKeyInfo = pOp->p4.pKeyInfo;
+ assert( pKeyInfo->aSortOrder!=0 );
+ sqlite3XPrintf(&x, "k(%d", pKeyInfo->nField);
+ for(j=0; j<pKeyInfo->nField; j++){
+ CollSeq *pColl = pKeyInfo->aColl[j];
+ const char *zColl = pColl ? pColl->zName : "";
+ if( strcmp(zColl, "BINARY")==0 ) zColl = "B";
+ sqlite3XPrintf(&x, ",%s%s", pKeyInfo->aSortOrder[j] ? "-" : "", zColl);
+ }
+ sqlite3StrAccumAppend(&x, ")", 1);
+ break;
+ }
+#ifdef SQLITE_ENABLE_CURSOR_HINTS
+ case P4_EXPR: {
+ displayP4Expr(&x, pOp->p4.pExpr);
+ break;
+ }
+#endif
+ case P4_COLLSEQ: {
+ CollSeq *pColl = pOp->p4.pColl;
+ sqlite3XPrintf(&x, "(%.20s)", pColl->zName);
+ break;
+ }
+ case P4_FUNCDEF: {
+ FuncDef *pDef = pOp->p4.pFunc;
+ sqlite3XPrintf(&x, "%s(%d)", pDef->zName, pDef->nArg);
+ break;
+ }
+#if defined(SQLITE_DEBUG) || defined(VDBE_PROFILE)
+ case P4_FUNCCTX: {
+ FuncDef *pDef = pOp->p4.pCtx->pFunc;
+ sqlite3XPrintf(&x, "%s(%d)", pDef->zName, pDef->nArg);
+ break;
+ }
+#endif
+ case P4_INT64: {
+ sqlite3XPrintf(&x, "%lld", *pOp->p4.pI64);
+ break;
+ }
+ case P4_INT32: {
+ sqlite3XPrintf(&x, "%d", pOp->p4.i);
+ break;
+ }
+ case P4_REAL: {
+ sqlite3XPrintf(&x, "%.16g", *pOp->p4.pReal);
+ break;
+ }
+ case P4_MEM: {
+ Mem *pMem = pOp->p4.pMem;
+ if( pMem->flags & MEM_Str ){
+ zP4 = pMem->z;
+ }else if( pMem->flags & MEM_Int ){
+ sqlite3XPrintf(&x, "%lld", pMem->u.i);
+ }else if( pMem->flags & MEM_Real ){
+ sqlite3XPrintf(&x, "%.16g", pMem->u.r);
+ }else if( pMem->flags & MEM_Null ){
+ zP4 = "NULL";
+ }else{
+ assert( pMem->flags & MEM_Blob );
+ zP4 = "(blob)";
+ }
+ break;
+ }
+#ifndef SQLITE_OMIT_VIRTUALTABLE
+ case P4_VTAB: {
+ sqlite3_vtab *pVtab = pOp->p4.pVtab->pVtab;
+ sqlite3XPrintf(&x, "vtab:%p", pVtab);
+ break;
+ }
+#endif
+ case P4_INTARRAY: {
+ int i;
+ int *ai = pOp->p4.ai;
+ int n = ai[0]; /* The first element of an INTARRAY is always the
+ ** count of the number of elements to follow */
+ for(i=1; i<n; i++){
+ sqlite3XPrintf(&x, ",%d", ai[i]);
+ }
+ zTemp[0] = '[';
+ sqlite3StrAccumAppend(&x, "]", 1);
+ break;
+ }
+ case P4_SUBPROGRAM: {
+ sqlite3XPrintf(&x, "program");
+ break;
+ }
+ case P4_ADVANCE: {
+ zTemp[0] = 0;
+ break;
+ }
+ case P4_TABLE: {
+ sqlite3XPrintf(&x, "%s", pOp->p4.pTab->zName);
+ break;
+ }
+ default: {
+ zP4 = pOp->p4.z;
+ if( zP4==0 ){
+ zP4 = zTemp;
+ zTemp[0] = 0;
+ }
+ }
+ }
+ sqlite3StrAccumFinish(&x);
+ assert( zP4!=0 );
+ return zP4;
+}
+#endif /* VDBE_DISPLAY_P4 */
+
+/*
+** Declare to the Vdbe that the BTree object at db->aDb[i] is used.
+**
+** The prepared statements need to know in advance the complete set of
+** attached databases that will be use. A mask of these databases
+** is maintained in p->btreeMask. The p->lockMask value is the subset of
+** p->btreeMask of databases that will require a lock.
+*/
+SQLITE_PRIVATE void sqlite3VdbeUsesBtree(Vdbe *p, int i){
+ assert( i>=0 && i<p->db->nDb && i<(int)sizeof(yDbMask)*8 );
+ assert( i<(int)sizeof(p->btreeMask)*8 );
+ DbMaskSet(p->btreeMask, i);
+ if( i!=1 && sqlite3BtreeSharable(p->db->aDb[i].pBt) ){
+ DbMaskSet(p->lockMask, i);
+ }
+}
+
+#if !defined(SQLITE_OMIT_SHARED_CACHE)
+/*
+** If SQLite is compiled to support shared-cache mode and to be threadsafe,
+** this routine obtains the mutex associated with each BtShared structure
+** that may be accessed by the VM passed as an argument. In doing so it also
+** sets the BtShared.db member of each of the BtShared structures, ensuring
+** that the correct busy-handler callback is invoked if required.
+**
+** If SQLite is not threadsafe but does support shared-cache mode, then
+** sqlite3BtreeEnter() is invoked to set the BtShared.db variables
+** of all of BtShared structures accessible via the database handle
+** associated with the VM.
+**
+** If SQLite is not threadsafe and does not support shared-cache mode, this
+** function is a no-op.
+**
+** The p->btreeMask field is a bitmask of all btrees that the prepared
+** statement p will ever use. Let N be the number of bits in p->btreeMask
+** corresponding to btrees that use shared cache. Then the runtime of
+** this routine is N*N. But as N is rarely more than 1, this should not
+** be a problem.
+*/
+SQLITE_PRIVATE void sqlite3VdbeEnter(Vdbe *p){
+ int i;
+ sqlite3 *db;
+ Db *aDb;
+ int nDb;
+ if( DbMaskAllZero(p->lockMask) ) return; /* The common case */
+ db = p->db;
+ aDb = db->aDb;
+ nDb = db->nDb;
+ for(i=0; i<nDb; i++){
+ if( i!=1 && DbMaskTest(p->lockMask,i) && ALWAYS(aDb[i].pBt!=0) ){
+ sqlite3BtreeEnter(aDb[i].pBt);
+ }
+ }
+}
+#endif
+
+#if !defined(SQLITE_OMIT_SHARED_CACHE) && SQLITE_THREADSAFE>0
+/*
+** Unlock all of the btrees previously locked by a call to sqlite3VdbeEnter().
+*/
+static SQLITE_NOINLINE void vdbeLeave(Vdbe *p){
+ int i;
+ sqlite3 *db;
+ Db *aDb;
+ int nDb;
+ db = p->db;
+ aDb = db->aDb;
+ nDb = db->nDb;
+ for(i=0; i<nDb; i++){
+ if( i!=1 && DbMaskTest(p->lockMask,i) && ALWAYS(aDb[i].pBt!=0) ){
+ sqlite3BtreeLeave(aDb[i].pBt);
+ }
+ }
+}
+SQLITE_PRIVATE void sqlite3VdbeLeave(Vdbe *p){
+ if( DbMaskAllZero(p->lockMask) ) return; /* The common case */
+ vdbeLeave(p);
+}
+#endif
+
+#if defined(VDBE_PROFILE) || defined(SQLITE_DEBUG)
+/*
+** Print a single opcode. This routine is used for debugging only.
+*/
+SQLITE_PRIVATE void sqlite3VdbePrintOp(FILE *pOut, int pc, Op *pOp){
+ char *zP4;
+ char zPtr[50];
+ char zCom[100];
+ static const char *zFormat1 = "%4d %-13s %4d %4d %4d %-13s %.2X %s\n";
+ if( pOut==0 ) pOut = stdout;
+ zP4 = displayP4(pOp, zPtr, sizeof(zPtr));
+#ifdef SQLITE_ENABLE_EXPLAIN_COMMENTS
+ displayComment(pOp, zP4, zCom, sizeof(zCom));
+#else
+ zCom[0] = 0;
+#endif
+ /* NB: The sqlite3OpcodeName() function is implemented by code created
+ ** by the mkopcodeh.awk and mkopcodec.awk scripts which extract the
+ ** information from the vdbe.c source text */
+ fprintf(pOut, zFormat1, pc,
+ sqlite3OpcodeName(pOp->opcode), pOp->p1, pOp->p2, pOp->p3, zP4, pOp->p5,
+ zCom
+ );
+ fflush(pOut);
+}
+#endif
+
+/*
+** Initialize an array of N Mem element.
+*/
+static void initMemArray(Mem *p, int N, sqlite3 *db, u16 flags){
+ while( (N--)>0 ){
+ p->db = db;
+ p->flags = flags;
+ p->szMalloc = 0;
+#ifdef SQLITE_DEBUG
+ p->pScopyFrom = 0;
+#endif
+ p++;
+ }
+}
+
+/*
+** Release an array of N Mem elements
+*/
+static void releaseMemArray(Mem *p, int N){
+ if( p && N ){
+ Mem *pEnd = &p[N];
+ sqlite3 *db = p->db;
+ if( db->pnBytesFreed ){
+ do{
+ if( p->szMalloc ) sqlite3DbFree(db, p->zMalloc);
+ }while( (++p)<pEnd );
+ return;
+ }
+ do{
+ assert( (&p[1])==pEnd || p[0].db==p[1].db );
+ assert( sqlite3VdbeCheckMemInvariants(p) );
+
+ /* This block is really an inlined version of sqlite3VdbeMemRelease()
+ ** that takes advantage of the fact that the memory cell value is
+ ** being set to NULL after releasing any dynamic resources.
+ **
+ ** The justification for duplicating code is that according to
+ ** callgrind, this causes a certain test case to hit the CPU 4.7
+ ** percent less (x86 linux, gcc version 4.1.2, -O6) than if
+ ** sqlite3MemRelease() were called from here. With -O2, this jumps
+ ** to 6.6 percent. The test case is inserting 1000 rows into a table
+ ** with no indexes using a single prepared INSERT statement, bind()
+ ** and reset(). Inserts are grouped into a transaction.
+ */
+ testcase( p->flags & MEM_Agg );
+ testcase( p->flags & MEM_Dyn );
+ testcase( p->flags & MEM_Frame );
+ testcase( p->flags & MEM_RowSet );
+ if( p->flags&(MEM_Agg|MEM_Dyn|MEM_Frame|MEM_RowSet) ){
+ sqlite3VdbeMemRelease(p);
+ }else if( p->szMalloc ){
+ sqlite3DbFree(db, p->zMalloc);
+ p->szMalloc = 0;
+ }
+
+ p->flags = MEM_Undefined;
+ }while( (++p)<pEnd );
+ }
+}
+
+/*
+** Delete a VdbeFrame object and its contents. VdbeFrame objects are
+** allocated by the OP_Program opcode in sqlite3VdbeExec().
+*/
+SQLITE_PRIVATE void sqlite3VdbeFrameDelete(VdbeFrame *p){
+ int i;
+ Mem *aMem = VdbeFrameMem(p);
+ VdbeCursor **apCsr = (VdbeCursor **)&aMem[p->nChildMem];
+ for(i=0; i<p->nChildCsr; i++){
+ sqlite3VdbeFreeCursor(p->v, apCsr[i]);
+ }
+ releaseMemArray(aMem, p->nChildMem);
+ sqlite3VdbeDeleteAuxData(p->v->db, &p->pAuxData, -1, 0);
+ sqlite3DbFree(p->v->db, p);
+}
+
+#ifndef SQLITE_OMIT_EXPLAIN
+/*
+** Give a listing of the program in the virtual machine.
+**
+** The interface is the same as sqlite3VdbeExec(). But instead of
+** running the code, it invokes the callback once for each instruction.
+** This feature is used to implement "EXPLAIN".
+**
+** When p->explain==1, each instruction is listed. When
+** p->explain==2, only OP_Explain instructions are listed and these
+** are shown in a different format. p->explain==2 is used to implement
+** EXPLAIN QUERY PLAN.
+**
+** When p->explain==1, first the main program is listed, then each of
+** the trigger subprograms are listed one by one.
+*/
+SQLITE_PRIVATE int sqlite3VdbeList(
+ Vdbe *p /* The VDBE */
+){
+ int nRow; /* Stop when row count reaches this */
+ int nSub = 0; /* Number of sub-vdbes seen so far */
+ SubProgram **apSub = 0; /* Array of sub-vdbes */
+ Mem *pSub = 0; /* Memory cell hold array of subprogs */
+ sqlite3 *db = p->db; /* The database connection */
+ int i; /* Loop counter */
+ int rc = SQLITE_OK; /* Return code */
+ Mem *pMem = &p->aMem[1]; /* First Mem of result set */
+
+ assert( p->explain );
+ assert( p->magic==VDBE_MAGIC_RUN );
+ assert( p->rc==SQLITE_OK || p->rc==SQLITE_BUSY || p->rc==SQLITE_NOMEM );
+
+ /* Even though this opcode does not use dynamic strings for
+ ** the result, result columns may become dynamic if the user calls
+ ** sqlite3_column_text16(), causing a translation to UTF-16 encoding.
+ */
+ releaseMemArray(pMem, 8);
+ p->pResultSet = 0;
+
+ if( p->rc==SQLITE_NOMEM_BKPT ){
+ /* This happens if a malloc() inside a call to sqlite3_column_text() or
+ ** sqlite3_column_text16() failed. */
+ sqlite3OomFault(db);
+ return SQLITE_ERROR;
+ }
+
+ /* When the number of output rows reaches nRow, that means the
+ ** listing has finished and sqlite3_step() should return SQLITE_DONE.
+ ** nRow is the sum of the number of rows in the main program, plus
+ ** the sum of the number of rows in all trigger subprograms encountered
+ ** so far. The nRow value will increase as new trigger subprograms are
+ ** encountered, but p->pc will eventually catch up to nRow.
+ */
+ nRow = p->nOp;
+ if( p->explain==1 ){
+ /* The first 8 memory cells are used for the result set. So we will
+ ** commandeer the 9th cell to use as storage for an array of pointers
+ ** to trigger subprograms. The VDBE is guaranteed to have at least 9
+ ** cells. */
+ assert( p->nMem>9 );
+ pSub = &p->aMem[9];
+ if( pSub->flags&MEM_Blob ){
+ /* On the first call to sqlite3_step(), pSub will hold a NULL. It is
+ ** initialized to a BLOB by the P4_SUBPROGRAM processing logic below */
+ nSub = pSub->n/sizeof(Vdbe*);
+ apSub = (SubProgram **)pSub->z;
+ }
+ for(i=0; i<nSub; i++){
+ nRow += apSub[i]->nOp;
+ }
+ }
+
+ do{
+ i = p->pc++;
+ }while( i<nRow && p->explain==2 && p->aOp[i].opcode!=OP_Explain );
+ if( i>=nRow ){
+ p->rc = SQLITE_OK;
+ rc = SQLITE_DONE;
+ }else if( db->u1.isInterrupted ){
+ p->rc = SQLITE_INTERRUPT;
+ rc = SQLITE_ERROR;
+ sqlite3VdbeError(p, sqlite3ErrStr(p->rc));
+ }else{
+ char *zP4;
+ Op *pOp;
+ if( i<p->nOp ){
+ /* The output line number is small enough that we are still in the
+ ** main program. */
+ pOp = &p->aOp[i];
+ }else{
+ /* We are currently listing subprograms. Figure out which one and
+ ** pick up the appropriate opcode. */
+ int j;
+ i -= p->nOp;
+ for(j=0; i>=apSub[j]->nOp; j++){
+ i -= apSub[j]->nOp;
+ }
+ pOp = &apSub[j]->aOp[i];
+ }
+ if( p->explain==1 ){
+ pMem->flags = MEM_Int;
+ pMem->u.i = i; /* Program counter */
+ pMem++;
+
+ pMem->flags = MEM_Static|MEM_Str|MEM_Term;
+ pMem->z = (char*)sqlite3OpcodeName(pOp->opcode); /* Opcode */
+ assert( pMem->z!=0 );
+ pMem->n = sqlite3Strlen30(pMem->z);
+ pMem->enc = SQLITE_UTF8;
+ pMem++;
+
+ /* When an OP_Program opcode is encounter (the only opcode that has
+ ** a P4_SUBPROGRAM argument), expand the size of the array of subprograms
+ ** kept in p->aMem[9].z to hold the new program - assuming this subprogram
+ ** has not already been seen.
+ */
+ if( pOp->p4type==P4_SUBPROGRAM ){
+ int nByte = (nSub+1)*sizeof(SubProgram*);
+ int j;
+ for(j=0; j<nSub; j++){
+ if( apSub[j]==pOp->p4.pProgram ) break;
+ }
+ if( j==nSub && SQLITE_OK==sqlite3VdbeMemGrow(pSub, nByte, nSub!=0) ){
+ apSub = (SubProgram **)pSub->z;
+ apSub[nSub++] = pOp->p4.pProgram;
+ pSub->flags |= MEM_Blob;
+ pSub->n = nSub*sizeof(SubProgram*);
+ }
+ }
+ }
+
+ pMem->flags = MEM_Int;
+ pMem->u.i = pOp->p1; /* P1 */
+ pMem++;
+
+ pMem->flags = MEM_Int;
+ pMem->u.i = pOp->p2; /* P2 */
+ pMem++;
+
+ pMem->flags = MEM_Int;
+ pMem->u.i = pOp->p3; /* P3 */
+ pMem++;
+
+ if( sqlite3VdbeMemClearAndResize(pMem, 100) ){ /* P4 */
+ assert( p->db->mallocFailed );
+ return SQLITE_ERROR;
+ }
+ pMem->flags = MEM_Str|MEM_Term;
+ zP4 = displayP4(pOp, pMem->z, pMem->szMalloc);
+ if( zP4!=pMem->z ){
+ pMem->n = 0;
+ sqlite3VdbeMemSetStr(pMem, zP4, -1, SQLITE_UTF8, 0);
+ }else{
+ assert( pMem->z!=0 );
+ pMem->n = sqlite3Strlen30(pMem->z);
+ pMem->enc = SQLITE_UTF8;
+ }
+ pMem++;
+
+ if( p->explain==1 ){
+ if( sqlite3VdbeMemClearAndResize(pMem, 4) ){
+ assert( p->db->mallocFailed );
+ return SQLITE_ERROR;
+ }
+ pMem->flags = MEM_Str|MEM_Term;
+ pMem->n = 2;
+ sqlite3_snprintf(3, pMem->z, "%.2x", pOp->p5); /* P5 */
+ pMem->enc = SQLITE_UTF8;
+ pMem++;
+
+#ifdef SQLITE_ENABLE_EXPLAIN_COMMENTS
+ if( sqlite3VdbeMemClearAndResize(pMem, 500) ){
+ assert( p->db->mallocFailed );
+ return SQLITE_ERROR;
+ }
+ pMem->flags = MEM_Str|MEM_Term;
+ pMem->n = displayComment(pOp, zP4, pMem->z, 500);
+ pMem->enc = SQLITE_UTF8;
+#else
+ pMem->flags = MEM_Null; /* Comment */
+#endif
+ }
+
+ p->nResColumn = 8 - 4*(p->explain-1);
+ p->pResultSet = &p->aMem[1];
+ p->rc = SQLITE_OK;
+ rc = SQLITE_ROW;
+ }
+ return rc;
+}
+#endif /* SQLITE_OMIT_EXPLAIN */
+
+#ifdef SQLITE_DEBUG
+/*
+** Print the SQL that was used to generate a VDBE program.
+*/
+SQLITE_PRIVATE void sqlite3VdbePrintSql(Vdbe *p){
+ const char *z = 0;
+ if( p->zSql ){
+ z = p->zSql;
+ }else if( p->nOp>=1 ){
+ const VdbeOp *pOp = &p->aOp[0];
+ if( pOp->opcode==OP_Init && pOp->p4.z!=0 ){
+ z = pOp->p4.z;
+ while( sqlite3Isspace(*z) ) z++;
+ }
+ }
+ if( z ) printf("SQL: [%s]\n", z);
+}
+#endif
+
+#if !defined(SQLITE_OMIT_TRACE) && defined(SQLITE_ENABLE_IOTRACE)
+/*
+** Print an IOTRACE message showing SQL content.
+*/
+SQLITE_PRIVATE void sqlite3VdbeIOTraceSql(Vdbe *p){
+ int nOp = p->nOp;
+ VdbeOp *pOp;
+ if( sqlite3IoTrace==0 ) return;
+ if( nOp<1 ) return;
+ pOp = &p->aOp[0];
+ if( pOp->opcode==OP_Init && pOp->p4.z!=0 ){
+ int i, j;
+ char z[1000];
+ sqlite3_snprintf(sizeof(z), z, "%s", pOp->p4.z);
+ for(i=0; sqlite3Isspace(z[i]); i++){}
+ for(j=0; z[i]; i++){
+ if( sqlite3Isspace(z[i]) ){
+ if( z[i-1]!=' ' ){
+ z[j++] = ' ';
+ }
+ }else{
+ z[j++] = z[i];
+ }
+ }
+ z[j] = 0;
+ sqlite3IoTrace("SQL %s\n", z);
+ }
+}
+#endif /* !SQLITE_OMIT_TRACE && SQLITE_ENABLE_IOTRACE */
+
+/* An instance of this object describes bulk memory available for use
+** by subcomponents of a prepared statement. Space is allocated out
+** of a ReusableSpace object by the allocSpace() routine below.
+*/
+struct ReusableSpace {
+ u8 *pSpace; /* Available memory */
+ int nFree; /* Bytes of available memory */
+ int nNeeded; /* Total bytes that could not be allocated */
+};
+
+/* Try to allocate nByte bytes of 8-byte aligned bulk memory for pBuf
+** from the ReusableSpace object. Return a pointer to the allocated
+** memory on success. If insufficient memory is available in the
+** ReusableSpace object, increase the ReusableSpace.nNeeded
+** value by the amount needed and return NULL.
+**
+** If pBuf is not initially NULL, that means that the memory has already
+** been allocated by a prior call to this routine, so just return a copy
+** of pBuf and leave ReusableSpace unchanged.
+**
+** This allocator is employed to repurpose unused slots at the end of the
+** opcode array of prepared state for other memory needs of the prepared
+** statement.
+*/
+static void *allocSpace(
+ struct ReusableSpace *p, /* Bulk memory available for allocation */
+ void *pBuf, /* Pointer to a prior allocation */
+ int nByte /* Bytes of memory needed */
+){
+ assert( EIGHT_BYTE_ALIGNMENT(p->pSpace) );
+ if( pBuf==0 ){
+ nByte = ROUND8(nByte);
+ if( nByte <= p->nFree ){
+ p->nFree -= nByte;
+ pBuf = &p->pSpace[p->nFree];
+ }else{
+ p->nNeeded += nByte;
+ }
+ }
+ assert( EIGHT_BYTE_ALIGNMENT(pBuf) );
+ return pBuf;
+}
+
+/*
+** Rewind the VDBE back to the beginning in preparation for
+** running it.
+*/
+SQLITE_PRIVATE void sqlite3VdbeRewind(Vdbe *p){
+#if defined(SQLITE_DEBUG) || defined(VDBE_PROFILE)
+ int i;
+#endif
+ assert( p!=0 );
+ assert( p->magic==VDBE_MAGIC_INIT || p->magic==VDBE_MAGIC_RESET );
+
+ /* There should be at least one opcode.
+ */
+ assert( p->nOp>0 );
+
+ /* Set the magic to VDBE_MAGIC_RUN sooner rather than later. */
+ p->magic = VDBE_MAGIC_RUN;
+
+#ifdef SQLITE_DEBUG
+ for(i=0; i<p->nMem; i++){
+ assert( p->aMem[i].db==p->db );
+ }
+#endif
+ p->pc = -1;
+ p->rc = SQLITE_OK;
+ p->errorAction = OE_Abort;
+ p->nChange = 0;
+ p->cacheCtr = 1;
+ p->minWriteFileFormat = 255;
+ p->iStatement = 0;
+ p->nFkConstraint = 0;
+#ifdef VDBE_PROFILE
+ for(i=0; i<p->nOp; i++){
+ p->aOp[i].cnt = 0;
+ p->aOp[i].cycles = 0;
+ }
+#endif
+}
+
+/*
+** Prepare a virtual machine for execution for the first time after
+** creating the virtual machine. This involves things such
+** as allocating registers and initializing the program counter.
+** After the VDBE has be prepped, it can be executed by one or more
+** calls to sqlite3VdbeExec().
+**
+** This function may be called exactly once on each virtual machine.
+** After this routine is called the VM has been "packaged" and is ready
+** to run. After this routine is called, further calls to
+** sqlite3VdbeAddOp() functions are prohibited. This routine disconnects
+** the Vdbe from the Parse object that helped generate it so that the
+** the Vdbe becomes an independent entity and the Parse object can be
+** destroyed.
+**
+** Use the sqlite3VdbeRewind() procedure to restore a virtual machine back
+** to its initial state after it has been run.
+*/
+SQLITE_PRIVATE void sqlite3VdbeMakeReady(
+ Vdbe *p, /* The VDBE */
+ Parse *pParse /* Parsing context */
+){
+ sqlite3 *db; /* The database connection */
+ int nVar; /* Number of parameters */
+ int nMem; /* Number of VM memory registers */
+ int nCursor; /* Number of cursors required */
+ int nArg; /* Number of arguments in subprograms */
+ int n; /* Loop counter */
+ struct ReusableSpace x; /* Reusable bulk memory */
+
+ assert( p!=0 );
+ assert( p->nOp>0 );
+ assert( pParse!=0 );
+ assert( p->magic==VDBE_MAGIC_INIT );
+ assert( pParse==p->pParse );
+ db = p->db;
+ assert( db->mallocFailed==0 );
+ nVar = pParse->nVar;
+ nMem = pParse->nMem;
+ nCursor = pParse->nTab;
+ nArg = pParse->nMaxArg;
+
+ /* Each cursor uses a memory cell. The first cursor (cursor 0) can
+ ** use aMem[0] which is not otherwise used by the VDBE program. Allocate
+ ** space at the end of aMem[] for cursors 1 and greater.
+ ** See also: allocateCursor().
+ */
+ nMem += nCursor;
+ if( nCursor==0 && nMem>0 ) nMem++; /* Space for aMem[0] even if not used */
+
+ /* Figure out how much reusable memory is available at the end of the
+ ** opcode array. This extra memory will be reallocated for other elements
+ ** of the prepared statement.
+ */
+ n = ROUND8(sizeof(Op)*p->nOp); /* Bytes of opcode memory used */
+ x.pSpace = &((u8*)p->aOp)[n]; /* Unused opcode memory */
+ assert( EIGHT_BYTE_ALIGNMENT(x.pSpace) );
+ x.nFree = ROUNDDOWN8(pParse->szOpAlloc - n); /* Bytes of unused memory */
+ assert( x.nFree>=0 );
+ assert( EIGHT_BYTE_ALIGNMENT(&x.pSpace[x.nFree]) );
+
+ resolveP2Values(p, &nArg);
+ p->usesStmtJournal = (u8)(pParse->isMultiWrite && pParse->mayAbort);
+ if( pParse->explain && nMem<10 ){
+ nMem = 10;
+ }
+ p->expired = 0;
+
+ /* Memory for registers, parameters, cursor, etc, is allocated in one or two
+ ** passes. On the first pass, we try to reuse unused memory at the
+ ** end of the opcode array. If we are unable to satisfy all memory
+ ** requirements by reusing the opcode array tail, then the second
+ ** pass will fill in the remainder using a fresh memory allocation.
+ **
+ ** This two-pass approach that reuses as much memory as possible from
+ ** the leftover memory at the end of the opcode array. This can significantly
+ ** reduce the amount of memory held by a prepared statement.
+ */
+ do {
+ x.nNeeded = 0;
+ p->aMem = allocSpace(&x, p->aMem, nMem*sizeof(Mem));
+ p->aVar = allocSpace(&x, p->aVar, nVar*sizeof(Mem));
+ p->apArg = allocSpace(&x, p->apArg, nArg*sizeof(Mem*));
+ p->apCsr = allocSpace(&x, p->apCsr, nCursor*sizeof(VdbeCursor*));
+#ifdef SQLITE_ENABLE_STMT_SCANSTATUS
+ p->anExec = allocSpace(&x, p->anExec, p->nOp*sizeof(i64));
+#endif
+ if( x.nNeeded==0 ) break;
+ x.pSpace = p->pFree = sqlite3DbMallocRawNN(db, x.nNeeded);
+ x.nFree = x.nNeeded;
+ }while( !db->mallocFailed );
+
+ p->pVList = pParse->pVList;
+ pParse->pVList = 0;
+ p->explain = pParse->explain;
+ if( db->mallocFailed ){
+ p->nVar = 0;
+ p->nCursor = 0;
+ p->nMem = 0;
+ }else{
+ p->nCursor = nCursor;
+ p->nVar = (ynVar)nVar;
+ initMemArray(p->aVar, nVar, db, MEM_Null);
+ p->nMem = nMem;
+ initMemArray(p->aMem, nMem, db, MEM_Undefined);
+ memset(p->apCsr, 0, nCursor*sizeof(VdbeCursor*));
+#ifdef SQLITE_ENABLE_STMT_SCANSTATUS
+ memset(p->anExec, 0, p->nOp*sizeof(i64));
+#endif
+ }
+ sqlite3VdbeRewind(p);
+}
+
+/*
+** Close a VDBE cursor and release all the resources that cursor
+** happens to hold.
+*/
+SQLITE_PRIVATE void sqlite3VdbeFreeCursor(Vdbe *p, VdbeCursor *pCx){
+ if( pCx==0 ){
+ return;
+ }
+ assert( pCx->pBtx==0 || pCx->eCurType==CURTYPE_BTREE );
+ switch( pCx->eCurType ){
+ case CURTYPE_SORTER: {
+ sqlite3VdbeSorterClose(p->db, pCx);
+ break;
+ }
+ case CURTYPE_BTREE: {
+ if( pCx->pBtx ){
+ sqlite3BtreeClose(pCx->pBtx);
+ /* The pCx->pCursor will be close automatically, if it exists, by
+ ** the call above. */
+ }else{
+ assert( pCx->uc.pCursor!=0 );
+ sqlite3BtreeCloseCursor(pCx->uc.pCursor);
+ }
+ break;
+ }
+#ifndef SQLITE_OMIT_VIRTUALTABLE
+ case CURTYPE_VTAB: {
+ sqlite3_vtab_cursor *pVCur = pCx->uc.pVCur;
+ const sqlite3_module *pModule = pVCur->pVtab->pModule;
+ assert( pVCur->pVtab->nRef>0 );
+ pVCur->pVtab->nRef--;
+ pModule->xClose(pVCur);
+ break;
+ }
+#endif
+ }
+}
+
+/*
+** Close all cursors in the current frame.
+*/
+static void closeCursorsInFrame(Vdbe *p){
+ if( p->apCsr ){
+ int i;
+ for(i=0; i<p->nCursor; i++){
+ VdbeCursor *pC = p->apCsr[i];
+ if( pC ){
+ sqlite3VdbeFreeCursor(p, pC);
+ p->apCsr[i] = 0;
+ }
+ }
+ }
+}
+
+/*
+** Copy the values stored in the VdbeFrame structure to its Vdbe. This
+** is used, for example, when a trigger sub-program is halted to restore
+** control to the main program.
+*/
+SQLITE_PRIVATE int sqlite3VdbeFrameRestore(VdbeFrame *pFrame){
+ Vdbe *v = pFrame->v;
+ closeCursorsInFrame(v);
+#ifdef SQLITE_ENABLE_STMT_SCANSTATUS
+ v->anExec = pFrame->anExec;
+#endif
+ v->aOp = pFrame->aOp;
+ v->nOp = pFrame->nOp;
+ v->aMem = pFrame->aMem;
+ v->nMem = pFrame->nMem;
+ v->apCsr = pFrame->apCsr;
+ v->nCursor = pFrame->nCursor;
+ v->db->lastRowid = pFrame->lastRowid;
+ v->nChange = pFrame->nChange;
+ v->db->nChange = pFrame->nDbChange;
+ sqlite3VdbeDeleteAuxData(v->db, &v->pAuxData, -1, 0);
+ v->pAuxData = pFrame->pAuxData;
+ pFrame->pAuxData = 0;
+ return pFrame->pc;
+}
+
+/*
+** Close all cursors.
+**
+** Also release any dynamic memory held by the VM in the Vdbe.aMem memory
+** cell array. This is necessary as the memory cell array may contain
+** pointers to VdbeFrame objects, which may in turn contain pointers to
+** open cursors.
+*/
+static void closeAllCursors(Vdbe *p){
+ if( p->pFrame ){
+ VdbeFrame *pFrame;
+ for(pFrame=p->pFrame; pFrame->pParent; pFrame=pFrame->pParent);
+ sqlite3VdbeFrameRestore(pFrame);
+ p->pFrame = 0;
+ p->nFrame = 0;
+ }
+ assert( p->nFrame==0 );
+ closeCursorsInFrame(p);
+ if( p->aMem ){
+ releaseMemArray(p->aMem, p->nMem);
+ }
+ while( p->pDelFrame ){
+ VdbeFrame *pDel = p->pDelFrame;
+ p->pDelFrame = pDel->pParent;
+ sqlite3VdbeFrameDelete(pDel);
+ }
+
+ /* Delete any auxdata allocations made by the VM */
+ if( p->pAuxData ) sqlite3VdbeDeleteAuxData(p->db, &p->pAuxData, -1, 0);
+ assert( p->pAuxData==0 );
+}
+
+/*
+** Clean up the VM after a single run.
+*/
+static void Cleanup(Vdbe *p){
+ sqlite3 *db = p->db;
+
+#ifdef SQLITE_DEBUG
+ /* Execute assert() statements to ensure that the Vdbe.apCsr[] and
+ ** Vdbe.aMem[] arrays have already been cleaned up. */
+ int i;
+ if( p->apCsr ) for(i=0; i<p->nCursor; i++) assert( p->apCsr[i]==0 );
+ if( p->aMem ){
+ for(i=0; i<p->nMem; i++) assert( p->aMem[i].flags==MEM_Undefined );
+ }
+#endif
+
+ sqlite3DbFree(db, p->zErrMsg);
+ p->zErrMsg = 0;
+ p->pResultSet = 0;
+}
+
+/*
+** Set the number of result columns that will be returned by this SQL
+** statement. This is now set at compile time, rather than during
+** execution of the vdbe program so that sqlite3_column_count() can
+** be called on an SQL statement before sqlite3_step().
+*/
+SQLITE_PRIVATE void sqlite3VdbeSetNumCols(Vdbe *p, int nResColumn){
+ Mem *pColName;
+ int n;
+ sqlite3 *db = p->db;
+
+ releaseMemArray(p->aColName, p->nResColumn*COLNAME_N);
+ sqlite3DbFree(db, p->aColName);
+ n = nResColumn*COLNAME_N;
+ p->nResColumn = (u16)nResColumn;
+ p->aColName = pColName = (Mem*)sqlite3DbMallocRawNN(db, sizeof(Mem)*n );
+ if( p->aColName==0 ) return;
+ initMemArray(p->aColName, n, p->db, MEM_Null);
+}
+
+/*
+** Set the name of the idx'th column to be returned by the SQL statement.
+** zName must be a pointer to a nul terminated string.
+**
+** This call must be made after a call to sqlite3VdbeSetNumCols().
+**
+** The final parameter, xDel, must be one of SQLITE_DYNAMIC, SQLITE_STATIC
+** or SQLITE_TRANSIENT. If it is SQLITE_DYNAMIC, then the buffer pointed
+** to by zName will be freed by sqlite3DbFree() when the vdbe is destroyed.
+*/
+SQLITE_PRIVATE int sqlite3VdbeSetColName(
+ Vdbe *p, /* Vdbe being configured */
+ int idx, /* Index of column zName applies to */
+ int var, /* One of the COLNAME_* constants */
+ const char *zName, /* Pointer to buffer containing name */
+ void (*xDel)(void*) /* Memory management strategy for zName */
+){
+ int rc;
+ Mem *pColName;
+ assert( idx<p->nResColumn );
+ assert( var<COLNAME_N );
+ if( p->db->mallocFailed ){
+ assert( !zName || xDel!=SQLITE_DYNAMIC );
+ return SQLITE_NOMEM_BKPT;
+ }
+ assert( p->aColName!=0 );
+ pColName = &(p->aColName[idx+var*p->nResColumn]);
+ rc = sqlite3VdbeMemSetStr(pColName, zName, -1, SQLITE_UTF8, xDel);
+ assert( rc!=0 || !zName || (pColName->flags&MEM_Term)!=0 );
+ return rc;
+}
+
+/*
+** A read or write transaction may or may not be active on database handle
+** db. If a transaction is active, commit it. If there is a
+** write-transaction spanning more than one database file, this routine
+** takes care of the master journal trickery.
+*/
+static int vdbeCommit(sqlite3 *db, Vdbe *p){
+ int i;
+ int nTrans = 0; /* Number of databases with an active write-transaction
+ ** that are candidates for a two-phase commit using a
+ ** master-journal */
+ int rc = SQLITE_OK;
+ int needXcommit = 0;
+
+#ifdef SQLITE_OMIT_VIRTUALTABLE
+ /* With this option, sqlite3VtabSync() is defined to be simply
+ ** SQLITE_OK so p is not used.
+ */
+ UNUSED_PARAMETER(p);
+#endif
+
+ /* Before doing anything else, call the xSync() callback for any
+ ** virtual module tables written in this transaction. This has to
+ ** be done before determining whether a master journal file is
+ ** required, as an xSync() callback may add an attached database
+ ** to the transaction.
+ */
+ rc = sqlite3VtabSync(db, p);
+
+ /* This loop determines (a) if the commit hook should be invoked and
+ ** (b) how many database files have open write transactions, not
+ ** including the temp database. (b) is important because if more than
+ ** one database file has an open write transaction, a master journal
+ ** file is required for an atomic commit.
+ */
+ for(i=0; rc==SQLITE_OK && i<db->nDb; i++){
+ Btree *pBt = db->aDb[i].pBt;
+ if( sqlite3BtreeIsInTrans(pBt) ){
+ /* Whether or not a database might need a master journal depends upon
+ ** its journal mode (among other things). This matrix determines which
+ ** journal modes use a master journal and which do not */
+ static const u8 aMJNeeded[] = {
+ /* DELETE */ 1,
+ /* PERSIST */ 1,
+ /* OFF */ 0,
+ /* TRUNCATE */ 1,
+ /* MEMORY */ 0,
+ /* WAL */ 0
+ };
+ Pager *pPager; /* Pager associated with pBt */
+ needXcommit = 1;
+ sqlite3BtreeEnter(pBt);
+ pPager = sqlite3BtreePager(pBt);
+ if( db->aDb[i].safety_level!=PAGER_SYNCHRONOUS_OFF
+ && aMJNeeded[sqlite3PagerGetJournalMode(pPager)]
+ ){
+ assert( i!=1 );
+ nTrans++;
+ }
+ rc = sqlite3PagerExclusiveLock(pPager);
+ sqlite3BtreeLeave(pBt);
+ }
+ }
+ if( rc!=SQLITE_OK ){
+ return rc;
+ }
+
+ /* If there are any write-transactions at all, invoke the commit hook */
+ if( needXcommit && db->xCommitCallback ){
+ rc = db->xCommitCallback(db->pCommitArg);
+ if( rc ){
+ return SQLITE_CONSTRAINT_COMMITHOOK;
+ }
+ }
+
+ /* The simple case - no more than one database file (not counting the
+ ** TEMP database) has a transaction active. There is no need for the
+ ** master-journal.
+ **
+ ** If the return value of sqlite3BtreeGetFilename() is a zero length
+ ** string, it means the main database is :memory: or a temp file. In
+ ** that case we do not support atomic multi-file commits, so use the
+ ** simple case then too.
+ */
+ if( 0==sqlite3Strlen30(sqlite3BtreeGetFilename(db->aDb[0].pBt))
+ || nTrans<=1
+ ){
+ for(i=0; rc==SQLITE_OK && i<db->nDb; i++){
+ Btree *pBt = db->aDb[i].pBt;
+ if( pBt ){
+ rc = sqlite3BtreeCommitPhaseOne(pBt, 0);
+ }
+ }
+
+ /* Do the commit only if all databases successfully complete phase 1.
+ ** If one of the BtreeCommitPhaseOne() calls fails, this indicates an
+ ** IO error while deleting or truncating a journal file. It is unlikely,
+ ** but could happen. In this case abandon processing and return the error.
+ */
+ for(i=0; rc==SQLITE_OK && i<db->nDb; i++){
+ Btree *pBt = db->aDb[i].pBt;
+ if( pBt ){
+ rc = sqlite3BtreeCommitPhaseTwo(pBt, 0);
+ }
+ }
+ if( rc==SQLITE_OK ){
+ sqlite3VtabCommit(db);
+ }
+ }
+
+ /* The complex case - There is a multi-file write-transaction active.
+ ** This requires a master journal file to ensure the transaction is
+ ** committed atomically.
+ */
+#ifndef SQLITE_OMIT_DISKIO
+ else{
+ sqlite3_vfs *pVfs = db->pVfs;
+ char *zMaster = 0; /* File-name for the master journal */
+ char const *zMainFile = sqlite3BtreeGetFilename(db->aDb[0].pBt);
+ sqlite3_file *pMaster = 0;
+ i64 offset = 0;
+ int res;
+ int retryCount = 0;
+ int nMainFile;
+
+ /* Select a master journal file name */
+ nMainFile = sqlite3Strlen30(zMainFile);
+ zMaster = sqlite3MPrintf(db, "%s-mjXXXXXX9XXz", zMainFile);
+ if( zMaster==0 ) return SQLITE_NOMEM_BKPT;
+ do {
+ u32 iRandom;
+ if( retryCount ){
+ if( retryCount>100 ){
+ sqlite3_log(SQLITE_FULL, "MJ delete: %s", zMaster);
+ sqlite3OsDelete(pVfs, zMaster, 0);
+ break;
+ }else if( retryCount==1 ){
+ sqlite3_log(SQLITE_FULL, "MJ collide: %s", zMaster);
+ }
+ }
+ retryCount++;
+ sqlite3_randomness(sizeof(iRandom), &iRandom);
+ sqlite3_snprintf(13, &zMaster[nMainFile], "-mj%06X9%02X",
+ (iRandom>>8)&0xffffff, iRandom&0xff);
+ /* The antipenultimate character of the master journal name must
+ ** be "9" to avoid name collisions when using 8+3 filenames. */
+ assert( zMaster[sqlite3Strlen30(zMaster)-3]=='9' );
+ sqlite3FileSuffix3(zMainFile, zMaster);
+ rc = sqlite3OsAccess(pVfs, zMaster, SQLITE_ACCESS_EXISTS, &res);
+ }while( rc==SQLITE_OK && res );
+ if( rc==SQLITE_OK ){
+ /* Open the master journal. */
+ rc = sqlite3OsOpenMalloc(pVfs, zMaster, &pMaster,
+ SQLITE_OPEN_READWRITE|SQLITE_OPEN_CREATE|
+ SQLITE_OPEN_EXCLUSIVE|SQLITE_OPEN_MASTER_JOURNAL, 0
+ );
+ }
+ if( rc!=SQLITE_OK ){
+ sqlite3DbFree(db, zMaster);
+ return rc;
+ }
+
+ /* Write the name of each database file in the transaction into the new
+ ** master journal file. If an error occurs at this point close
+ ** and delete the master journal file. All the individual journal files
+ ** still have 'null' as the master journal pointer, so they will roll
+ ** back independently if a failure occurs.
+ */
+ for(i=0; i<db->nDb; i++){
+ Btree *pBt = db->aDb[i].pBt;
+ if( sqlite3BtreeIsInTrans(pBt) ){
+ char const *zFile = sqlite3BtreeGetJournalname(pBt);
+ if( zFile==0 ){
+ continue; /* Ignore TEMP and :memory: databases */
+ }
+ assert( zFile[0]!=0 );
+ rc = sqlite3OsWrite(pMaster, zFile, sqlite3Strlen30(zFile)+1, offset);
+ offset += sqlite3Strlen30(zFile)+1;
+ if( rc!=SQLITE_OK ){
+ sqlite3OsCloseFree(pMaster);
+ sqlite3OsDelete(pVfs, zMaster, 0);
+ sqlite3DbFree(db, zMaster);
+ return rc;
+ }
+ }
+ }
+
+ /* Sync the master journal file. If the IOCAP_SEQUENTIAL device
+ ** flag is set this is not required.
+ */
+ if( 0==(sqlite3OsDeviceCharacteristics(pMaster)&SQLITE_IOCAP_SEQUENTIAL)
+ && SQLITE_OK!=(rc = sqlite3OsSync(pMaster, SQLITE_SYNC_NORMAL))
+ ){
+ sqlite3OsCloseFree(pMaster);
+ sqlite3OsDelete(pVfs, zMaster, 0);
+ sqlite3DbFree(db, zMaster);
+ return rc;
+ }
+
+ /* Sync all the db files involved in the transaction. The same call
+ ** sets the master journal pointer in each individual journal. If
+ ** an error occurs here, do not delete the master journal file.
+ **
+ ** If the error occurs during the first call to
+ ** sqlite3BtreeCommitPhaseOne(), then there is a chance that the
+ ** master journal file will be orphaned. But we cannot delete it,
+ ** in case the master journal file name was written into the journal
+ ** file before the failure occurred.
+ */
+ for(i=0; rc==SQLITE_OK && i<db->nDb; i++){
+ Btree *pBt = db->aDb[i].pBt;
+ if( pBt ){
+ rc = sqlite3BtreeCommitPhaseOne(pBt, zMaster);
+ }
+ }
+ sqlite3OsCloseFree(pMaster);
+ assert( rc!=SQLITE_BUSY );
+ if( rc!=SQLITE_OK ){
+ sqlite3DbFree(db, zMaster);
+ return rc;
+ }
+
+ /* Delete the master journal file. This commits the transaction. After
+ ** doing this the directory is synced again before any individual
+ ** transaction files are deleted.
+ */
+ rc = sqlite3OsDelete(pVfs, zMaster, 1);
+ sqlite3DbFree(db, zMaster);
+ zMaster = 0;
+ if( rc ){
+ return rc;
+ }
+
+ /* All files and directories have already been synced, so the following
+ ** calls to sqlite3BtreeCommitPhaseTwo() are only closing files and
+ ** deleting or truncating journals. If something goes wrong while
+ ** this is happening we don't really care. The integrity of the
+ ** transaction is already guaranteed, but some stray 'cold' journals
+ ** may be lying around. Returning an error code won't help matters.
+ */
+ disable_simulated_io_errors();
+ sqlite3BeginBenignMalloc();
+ for(i=0; i<db->nDb; i++){
+ Btree *pBt = db->aDb[i].pBt;
+ if( pBt ){
+ sqlite3BtreeCommitPhaseTwo(pBt, 1);
+ }
+ }
+ sqlite3EndBenignMalloc();
+ enable_simulated_io_errors();
+
+ sqlite3VtabCommit(db);
+ }
+#endif
+
+ return rc;
+}
+
+/*
+** This routine checks that the sqlite3.nVdbeActive count variable
+** matches the number of vdbe's in the list sqlite3.pVdbe that are
+** currently active. An assertion fails if the two counts do not match.
+** This is an internal self-check only - it is not an essential processing
+** step.
+**
+** This is a no-op if NDEBUG is defined.
+*/
+#ifndef NDEBUG
+static void checkActiveVdbeCnt(sqlite3 *db){
+ Vdbe *p;
+ int cnt = 0;
+ int nWrite = 0;
+ int nRead = 0;
+ p = db->pVdbe;
+ while( p ){
+ if( sqlite3_stmt_busy((sqlite3_stmt*)p) ){
+ cnt++;
+ if( p->readOnly==0 ) nWrite++;
+ if( p->bIsReader ) nRead++;
+ }
+ p = p->pNext;
+ }
+ assert( cnt==db->nVdbeActive );
+ assert( nWrite==db->nVdbeWrite );
+ assert( nRead==db->nVdbeRead );
+}
+#else
+#define checkActiveVdbeCnt(x)
+#endif
+
+/*
+** If the Vdbe passed as the first argument opened a statement-transaction,
+** close it now. Argument eOp must be either SAVEPOINT_ROLLBACK or
+** SAVEPOINT_RELEASE. If it is SAVEPOINT_ROLLBACK, then the statement
+** transaction is rolled back. If eOp is SAVEPOINT_RELEASE, then the
+** statement transaction is committed.
+**
+** If an IO error occurs, an SQLITE_IOERR_XXX error code is returned.
+** Otherwise SQLITE_OK.
+*/
+static SQLITE_NOINLINE int vdbeCloseStatement(Vdbe *p, int eOp){
+ sqlite3 *const db = p->db;
+ int rc = SQLITE_OK;
+ int i;
+ const int iSavepoint = p->iStatement-1;
+
+ assert( eOp==SAVEPOINT_ROLLBACK || eOp==SAVEPOINT_RELEASE);
+ assert( db->nStatement>0 );
+ assert( p->iStatement==(db->nStatement+db->nSavepoint) );
+
+ for(i=0; i<db->nDb; i++){
+ int rc2 = SQLITE_OK;
+ Btree *pBt = db->aDb[i].pBt;
+ if( pBt ){
+ if( eOp==SAVEPOINT_ROLLBACK ){
+ rc2 = sqlite3BtreeSavepoint(pBt, SAVEPOINT_ROLLBACK, iSavepoint);
+ }
+ if( rc2==SQLITE_OK ){
+ rc2 = sqlite3BtreeSavepoint(pBt, SAVEPOINT_RELEASE, iSavepoint);
+ }
+ if( rc==SQLITE_OK ){
+ rc = rc2;
+ }
+ }
+ }
+ db->nStatement--;
+ p->iStatement = 0;
+
+ if( rc==SQLITE_OK ){
+ if( eOp==SAVEPOINT_ROLLBACK ){
+ rc = sqlite3VtabSavepoint(db, SAVEPOINT_ROLLBACK, iSavepoint);
+ }
+ if( rc==SQLITE_OK ){
+ rc = sqlite3VtabSavepoint(db, SAVEPOINT_RELEASE, iSavepoint);
+ }
+ }
+
+ /* If the statement transaction is being rolled back, also restore the
+ ** database handles deferred constraint counter to the value it had when
+ ** the statement transaction was opened. */
+ if( eOp==SAVEPOINT_ROLLBACK ){
+ db->nDeferredCons = p->nStmtDefCons;
+ db->nDeferredImmCons = p->nStmtDefImmCons;
+ }
+ return rc;
+}
+SQLITE_PRIVATE int sqlite3VdbeCloseStatement(Vdbe *p, int eOp){
+ if( p->db->nStatement && p->iStatement ){
+ return vdbeCloseStatement(p, eOp);
+ }
+ return SQLITE_OK;
+}
+
+
+/*
+** This function is called when a transaction opened by the database
+** handle associated with the VM passed as an argument is about to be
+** committed. If there are outstanding deferred foreign key constraint
+** violations, return SQLITE_ERROR. Otherwise, SQLITE_OK.
+**
+** If there are outstanding FK violations and this function returns
+** SQLITE_ERROR, set the result of the VM to SQLITE_CONSTRAINT_FOREIGNKEY
+** and write an error message to it. Then return SQLITE_ERROR.
+*/
+#ifndef SQLITE_OMIT_FOREIGN_KEY
+SQLITE_PRIVATE int sqlite3VdbeCheckFk(Vdbe *p, int deferred){
+ sqlite3 *db = p->db;
+ if( (deferred && (db->nDeferredCons+db->nDeferredImmCons)>0)
+ || (!deferred && p->nFkConstraint>0)
+ ){
+ p->rc = SQLITE_CONSTRAINT_FOREIGNKEY;
+ p->errorAction = OE_Abort;
+ sqlite3VdbeError(p, "FOREIGN KEY constraint failed");
+ return SQLITE_ERROR;
+ }
+ return SQLITE_OK;
+}
+#endif
+
+/*
+** This routine is called the when a VDBE tries to halt. If the VDBE
+** has made changes and is in autocommit mode, then commit those
+** changes. If a rollback is needed, then do the rollback.
+**
+** This routine is the only way to move the state of a VM from
+** SQLITE_MAGIC_RUN to SQLITE_MAGIC_HALT. It is harmless to
+** call this on a VM that is in the SQLITE_MAGIC_HALT state.
+**
+** Return an error code. If the commit could not complete because of
+** lock contention, return SQLITE_BUSY. If SQLITE_BUSY is returned, it
+** means the close did not happen and needs to be repeated.
+*/
+SQLITE_PRIVATE int sqlite3VdbeHalt(Vdbe *p){
+ int rc; /* Used to store transient return codes */
+ sqlite3 *db = p->db;
+
+ /* This function contains the logic that determines if a statement or
+ ** transaction will be committed or rolled back as a result of the
+ ** execution of this virtual machine.
+ **
+ ** If any of the following errors occur:
+ **
+ ** SQLITE_NOMEM
+ ** SQLITE_IOERR
+ ** SQLITE_FULL
+ ** SQLITE_INTERRUPT
+ **
+ ** Then the internal cache might have been left in an inconsistent
+ ** state. We need to rollback the statement transaction, if there is
+ ** one, or the complete transaction if there is no statement transaction.
+ */
+
+ if( db->mallocFailed ){
+ p->rc = SQLITE_NOMEM_BKPT;
+ }
+ closeAllCursors(p);
+ if( p->magic!=VDBE_MAGIC_RUN ){
+ return SQLITE_OK;
+ }
+ checkActiveVdbeCnt(db);
+
+ /* No commit or rollback needed if the program never started or if the
+ ** SQL statement does not read or write a database file. */
+ if( p->pc>=0 && p->bIsReader ){
+ int mrc; /* Primary error code from p->rc */
+ int eStatementOp = 0;
+ int isSpecialError; /* Set to true if a 'special' error */
+
+ /* Lock all btrees used by the statement */
+ sqlite3VdbeEnter(p);
+
+ /* Check for one of the special errors */
+ mrc = p->rc & 0xff;
+ isSpecialError = mrc==SQLITE_NOMEM || mrc==SQLITE_IOERR
+ || mrc==SQLITE_INTERRUPT || mrc==SQLITE_FULL;
+ if( isSpecialError ){
+ /* If the query was read-only and the error code is SQLITE_INTERRUPT,
+ ** no rollback is necessary. Otherwise, at least a savepoint
+ ** transaction must be rolled back to restore the database to a
+ ** consistent state.
+ **
+ ** Even if the statement is read-only, it is important to perform
+ ** a statement or transaction rollback operation. If the error
+ ** occurred while writing to the journal, sub-journal or database
+ ** file as part of an effort to free up cache space (see function
+ ** pagerStress() in pager.c), the rollback is required to restore
+ ** the pager to a consistent state.
+ */
+ if( !p->readOnly || mrc!=SQLITE_INTERRUPT ){
+ if( (mrc==SQLITE_NOMEM || mrc==SQLITE_FULL) && p->usesStmtJournal ){
+ eStatementOp = SAVEPOINT_ROLLBACK;
+ }else{
+ /* We are forced to roll back the active transaction. Before doing
+ ** so, abort any other statements this handle currently has active.
+ */
+ sqlite3RollbackAll(db, SQLITE_ABORT_ROLLBACK);
+ sqlite3CloseSavepoints(db);
+ db->autoCommit = 1;
+ p->nChange = 0;
+ }
+ }
+ }
+
+ /* Check for immediate foreign key violations. */
+ if( p->rc==SQLITE_OK ){
+ sqlite3VdbeCheckFk(p, 0);
+ }
+
+ /* If the auto-commit flag is set and this is the only active writer
+ ** VM, then we do either a commit or rollback of the current transaction.
+ **
+ ** Note: This block also runs if one of the special errors handled
+ ** above has occurred.
+ */
+ if( !sqlite3VtabInSync(db)
+ && db->autoCommit
+ && db->nVdbeWrite==(p->readOnly==0)
+ ){
+ if( p->rc==SQLITE_OK || (p->errorAction==OE_Fail && !isSpecialError) ){
+ rc = sqlite3VdbeCheckFk(p, 1);
+ if( rc!=SQLITE_OK ){
+ if( NEVER(p->readOnly) ){
+ sqlite3VdbeLeave(p);
+ return SQLITE_ERROR;
+ }
+ rc = SQLITE_CONSTRAINT_FOREIGNKEY;
+ }else{
+ /* The auto-commit flag is true, the vdbe program was successful
+ ** or hit an 'OR FAIL' constraint and there are no deferred foreign
+ ** key constraints to hold up the transaction. This means a commit
+ ** is required. */
+ rc = vdbeCommit(db, p);
+ }
+ if( rc==SQLITE_BUSY && p->readOnly ){
+ sqlite3VdbeLeave(p);
+ return SQLITE_BUSY;
+ }else if( rc!=SQLITE_OK ){
+ p->rc = rc;
+ sqlite3RollbackAll(db, SQLITE_OK);
+ p->nChange = 0;
+ }else{
+ db->nDeferredCons = 0;
+ db->nDeferredImmCons = 0;
+ db->flags &= ~SQLITE_DeferFKs;
+ sqlite3CommitInternalChanges(db);
+ }
+ }else{
+ sqlite3RollbackAll(db, SQLITE_OK);
+ p->nChange = 0;
+ }
+ db->nStatement = 0;
+ }else if( eStatementOp==0 ){
+ if( p->rc==SQLITE_OK || p->errorAction==OE_Fail ){
+ eStatementOp = SAVEPOINT_RELEASE;
+ }else if( p->errorAction==OE_Abort ){
+ eStatementOp = SAVEPOINT_ROLLBACK;
+ }else{
+ sqlite3RollbackAll(db, SQLITE_ABORT_ROLLBACK);
+ sqlite3CloseSavepoints(db);
+ db->autoCommit = 1;
+ p->nChange = 0;
+ }
+ }
+
+ /* If eStatementOp is non-zero, then a statement transaction needs to
+ ** be committed or rolled back. Call sqlite3VdbeCloseStatement() to
+ ** do so. If this operation returns an error, and the current statement
+ ** error code is SQLITE_OK or SQLITE_CONSTRAINT, then promote the
+ ** current statement error code.
+ */
+ if( eStatementOp ){
+ rc = sqlite3VdbeCloseStatement(p, eStatementOp);
+ if( rc ){
+ if( p->rc==SQLITE_OK || (p->rc&0xff)==SQLITE_CONSTRAINT ){
+ p->rc = rc;
+ sqlite3DbFree(db, p->zErrMsg);
+ p->zErrMsg = 0;
+ }
+ sqlite3RollbackAll(db, SQLITE_ABORT_ROLLBACK);
+ sqlite3CloseSavepoints(db);
+ db->autoCommit = 1;
+ p->nChange = 0;
+ }
+ }
+
+ /* If this was an INSERT, UPDATE or DELETE and no statement transaction
+ ** has been rolled back, update the database connection change-counter.
+ */
+ if( p->changeCntOn ){
+ if( eStatementOp!=SAVEPOINT_ROLLBACK ){
+ sqlite3VdbeSetChanges(db, p->nChange);
+ }else{
+ sqlite3VdbeSetChanges(db, 0);
+ }
+ p->nChange = 0;
+ }
+
+ /* Release the locks */
+ sqlite3VdbeLeave(p);
+ }
+
+ /* We have successfully halted and closed the VM. Record this fact. */
+ if( p->pc>=0 ){
+ db->nVdbeActive--;
+ if( !p->readOnly ) db->nVdbeWrite--;
+ if( p->bIsReader ) db->nVdbeRead--;
+ assert( db->nVdbeActive>=db->nVdbeRead );
+ assert( db->nVdbeRead>=db->nVdbeWrite );
+ assert( db->nVdbeWrite>=0 );
+ }
+ p->magic = VDBE_MAGIC_HALT;
+ checkActiveVdbeCnt(db);
+ if( db->mallocFailed ){
+ p->rc = SQLITE_NOMEM_BKPT;
+ }
+
+ /* If the auto-commit flag is set to true, then any locks that were held
+ ** by connection db have now been released. Call sqlite3ConnectionUnlocked()
+ ** to invoke any required unlock-notify callbacks.
+ */
+ if( db->autoCommit ){
+ sqlite3ConnectionUnlocked(db);
+ }
+
+ assert( db->nVdbeActive>0 || db->autoCommit==0 || db->nStatement==0 );
+ return (p->rc==SQLITE_BUSY ? SQLITE_BUSY : SQLITE_OK);
+}
+
+
+/*
+** Each VDBE holds the result of the most recent sqlite3_step() call
+** in p->rc. This routine sets that result back to SQLITE_OK.
+*/
+SQLITE_PRIVATE void sqlite3VdbeResetStepResult(Vdbe *p){
+ p->rc = SQLITE_OK;
+}
+
+/*
+** Copy the error code and error message belonging to the VDBE passed
+** as the first argument to its database handle (so that they will be
+** returned by calls to sqlite3_errcode() and sqlite3_errmsg()).
+**
+** This function does not clear the VDBE error code or message, just
+** copies them to the database handle.
+*/
+SQLITE_PRIVATE int sqlite3VdbeTransferError(Vdbe *p){
+ sqlite3 *db = p->db;
+ int rc = p->rc;
+ if( p->zErrMsg ){
+ db->bBenignMalloc++;
+ sqlite3BeginBenignMalloc();
+ if( db->pErr==0 ) db->pErr = sqlite3ValueNew(db);
+ sqlite3ValueSetStr(db->pErr, -1, p->zErrMsg, SQLITE_UTF8, SQLITE_TRANSIENT);
+ sqlite3EndBenignMalloc();
+ db->bBenignMalloc--;
+ db->errCode = rc;
+ }else{
+ sqlite3Error(db, rc);
+ }
+ return rc;
+}
+
+#ifdef SQLITE_ENABLE_SQLLOG
+/*
+** If an SQLITE_CONFIG_SQLLOG hook is registered and the VM has been run,
+** invoke it.
+*/
+static void vdbeInvokeSqllog(Vdbe *v){
+ if( sqlite3GlobalConfig.xSqllog && v->rc==SQLITE_OK && v->zSql && v->pc>=0 ){
+ char *zExpanded = sqlite3VdbeExpandSql(v, v->zSql);
+ assert( v->db->init.busy==0 );
+ if( zExpanded ){
+ sqlite3GlobalConfig.xSqllog(
+ sqlite3GlobalConfig.pSqllogArg, v->db, zExpanded, 1
+ );
+ sqlite3DbFree(v->db, zExpanded);
+ }
+ }
+}
+#else
+# define vdbeInvokeSqllog(x)
+#endif
+
+/*
+** Clean up a VDBE after execution but do not delete the VDBE just yet.
+** Write any error messages into *pzErrMsg. Return the result code.
+**
+** After this routine is run, the VDBE should be ready to be executed
+** again.
+**
+** To look at it another way, this routine resets the state of the
+** virtual machine from VDBE_MAGIC_RUN or VDBE_MAGIC_HALT back to
+** VDBE_MAGIC_INIT.
+*/
+SQLITE_PRIVATE int sqlite3VdbeReset(Vdbe *p){
+ sqlite3 *db;
+ db = p->db;
+
+ /* If the VM did not run to completion or if it encountered an
+ ** error, then it might not have been halted properly. So halt
+ ** it now.
+ */
+ sqlite3VdbeHalt(p);
+
+ /* If the VDBE has be run even partially, then transfer the error code
+ ** and error message from the VDBE into the main database structure. But
+ ** if the VDBE has just been set to run but has not actually executed any
+ ** instructions yet, leave the main database error information unchanged.
+ */
+ if( p->pc>=0 ){
+ vdbeInvokeSqllog(p);
+ sqlite3VdbeTransferError(p);
+ sqlite3DbFree(db, p->zErrMsg);
+ p->zErrMsg = 0;
+ if( p->runOnlyOnce ) p->expired = 1;
+ }else if( p->rc && p->expired ){
+ /* The expired flag was set on the VDBE before the first call
+ ** to sqlite3_step(). For consistency (since sqlite3_step() was
+ ** called), set the database error in this case as well.
+ */
+ sqlite3ErrorWithMsg(db, p->rc, p->zErrMsg ? "%s" : 0, p->zErrMsg);
+ sqlite3DbFree(db, p->zErrMsg);
+ p->zErrMsg = 0;
+ }
+
+ /* Reclaim all memory used by the VDBE
+ */
+ Cleanup(p);
+
+ /* Save profiling information from this VDBE run.
+ */
+#ifdef VDBE_PROFILE
+ {
+ FILE *out = fopen("vdbe_profile.out", "a");
+ if( out ){
+ int i;
+ fprintf(out, "---- ");
+ for(i=0; i<p->nOp; i++){
+ fprintf(out, "%02x", p->aOp[i].opcode);
+ }
+ fprintf(out, "\n");
+ if( p->zSql ){
+ char c, pc = 0;
+ fprintf(out, "-- ");
+ for(i=0; (c = p->zSql[i])!=0; i++){
+ if( pc=='\n' ) fprintf(out, "-- ");
+ putc(c, out);
+ pc = c;
+ }
+ if( pc!='\n' ) fprintf(out, "\n");
+ }
+ for(i=0; i<p->nOp; i++){
+ char zHdr[100];
+ sqlite3_snprintf(sizeof(zHdr), zHdr, "%6u %12llu %8llu ",
+ p->aOp[i].cnt,
+ p->aOp[i].cycles,
+ p->aOp[i].cnt>0 ? p->aOp[i].cycles/p->aOp[i].cnt : 0
+ );
+ fprintf(out, "%s", zHdr);
+ sqlite3VdbePrintOp(out, i, &p->aOp[i]);
+ }
+ fclose(out);
+ }
+ }
+#endif
+ p->iCurrentTime = 0;
+ p->magic = VDBE_MAGIC_RESET;
+ return p->rc & db->errMask;
+}
+
+/*
+** Clean up and delete a VDBE after execution. Return an integer which is
+** the result code. Write any error message text into *pzErrMsg.
+*/
+SQLITE_PRIVATE int sqlite3VdbeFinalize(Vdbe *p){
+ int rc = SQLITE_OK;
+ if( p->magic==VDBE_MAGIC_RUN || p->magic==VDBE_MAGIC_HALT ){
+ rc = sqlite3VdbeReset(p);
+ assert( (rc & p->db->errMask)==rc );
+ }
+ sqlite3VdbeDelete(p);
+ return rc;
+}
+
+/*
+** If parameter iOp is less than zero, then invoke the destructor for
+** all auxiliary data pointers currently cached by the VM passed as
+** the first argument.
+**
+** Or, if iOp is greater than or equal to zero, then the destructor is
+** only invoked for those auxiliary data pointers created by the user
+** function invoked by the OP_Function opcode at instruction iOp of
+** VM pVdbe, and only then if:
+**
+** * the associated function parameter is the 32nd or later (counting
+** from left to right), or
+**
+** * the corresponding bit in argument mask is clear (where the first
+** function parameter corresponds to bit 0 etc.).
+*/
+SQLITE_PRIVATE void sqlite3VdbeDeleteAuxData(sqlite3 *db, AuxData **pp, int iOp, int mask){
+ while( *pp ){
+ AuxData *pAux = *pp;
+ if( (iOp<0)
+ || (pAux->iOp==iOp && (pAux->iArg>31 || !(mask & MASKBIT32(pAux->iArg))))
+ ){
+ testcase( pAux->iArg==31 );
+ if( pAux->xDelete ){
+ pAux->xDelete(pAux->pAux);
+ }
+ *pp = pAux->pNext;
+ sqlite3DbFree(db, pAux);
+ }else{
+ pp= &pAux->pNext;
+ }
+ }
+}
+
+/*
+** Free all memory associated with the Vdbe passed as the second argument,
+** except for object itself, which is preserved.
+**
+** The difference between this function and sqlite3VdbeDelete() is that
+** VdbeDelete() also unlinks the Vdbe from the list of VMs associated with
+** the database connection and frees the object itself.
+*/
+SQLITE_PRIVATE void sqlite3VdbeClearObject(sqlite3 *db, Vdbe *p){
+ SubProgram *pSub, *pNext;
+ assert( p->db==0 || p->db==db );
+ releaseMemArray(p->aColName, p->nResColumn*COLNAME_N);
+ for(pSub=p->pProgram; pSub; pSub=pNext){
+ pNext = pSub->pNext;
+ vdbeFreeOpArray(db, pSub->aOp, pSub->nOp);
+ sqlite3DbFree(db, pSub);
+ }
+ if( p->magic!=VDBE_MAGIC_INIT ){
+ releaseMemArray(p->aVar, p->nVar);
+ sqlite3DbFree(db, p->pVList);
+ sqlite3DbFree(db, p->pFree);
+ }
+ vdbeFreeOpArray(db, p->aOp, p->nOp);
+ sqlite3DbFree(db, p->aColName);
+ sqlite3DbFree(db, p->zSql);
+#ifdef SQLITE_ENABLE_STMT_SCANSTATUS
+ {
+ int i;
+ for(i=0; i<p->nScan; i++){
+ sqlite3DbFree(db, p->aScan[i].zName);
+ }
+ sqlite3DbFree(db, p->aScan);
+ }
+#endif
+}
+
+/*
+** Delete an entire VDBE.
+*/
+SQLITE_PRIVATE void sqlite3VdbeDelete(Vdbe *p){
+ sqlite3 *db;
+
+ if( NEVER(p==0) ) return;
+ db = p->db;
+ assert( sqlite3_mutex_held(db->mutex) );
+ sqlite3VdbeClearObject(db, p);
+ if( p->pPrev ){
+ p->pPrev->pNext = p->pNext;
+ }else{
+ assert( db->pVdbe==p );
+ db->pVdbe = p->pNext;
+ }
+ if( p->pNext ){
+ p->pNext->pPrev = p->pPrev;
+ }
+ p->magic = VDBE_MAGIC_DEAD;
+ p->db = 0;
+ sqlite3DbFree(db, p);
+}
+
+/*
+** The cursor "p" has a pending seek operation that has not yet been
+** carried out. Seek the cursor now. If an error occurs, return
+** the appropriate error code.
+*/
+static int SQLITE_NOINLINE handleDeferredMoveto(VdbeCursor *p){
+ int res, rc;
+#ifdef SQLITE_TEST
+ extern int sqlite3_search_count;
+#endif
+ assert( p->deferredMoveto );
+ assert( p->isTable );
+ assert( p->eCurType==CURTYPE_BTREE );
+ rc = sqlite3BtreeMovetoUnpacked(p->uc.pCursor, 0, p->movetoTarget, 0, &res);
+ if( rc ) return rc;
+ if( res!=0 ) return SQLITE_CORRUPT_BKPT;
+#ifdef SQLITE_TEST
+ sqlite3_search_count++;
+#endif
+ p->deferredMoveto = 0;
+ p->cacheStatus = CACHE_STALE;
+ return SQLITE_OK;
+}
+
+/*
+** Something has moved cursor "p" out of place. Maybe the row it was
+** pointed to was deleted out from under it. Or maybe the btree was
+** rebalanced. Whatever the cause, try to restore "p" to the place it
+** is supposed to be pointing. If the row was deleted out from under the
+** cursor, set the cursor to point to a NULL row.
+*/
+static int SQLITE_NOINLINE handleMovedCursor(VdbeCursor *p){
+ int isDifferentRow, rc;
+ assert( p->eCurType==CURTYPE_BTREE );
+ assert( p->uc.pCursor!=0 );
+ assert( sqlite3BtreeCursorHasMoved(p->uc.pCursor) );
+ rc = sqlite3BtreeCursorRestore(p->uc.pCursor, &isDifferentRow);
+ p->cacheStatus = CACHE_STALE;
+ if( isDifferentRow ) p->nullRow = 1;
+ return rc;
+}
+
+/*
+** Check to ensure that the cursor is valid. Restore the cursor
+** if need be. Return any I/O error from the restore operation.
+*/
+SQLITE_PRIVATE int sqlite3VdbeCursorRestore(VdbeCursor *p){
+ assert( p->eCurType==CURTYPE_BTREE );
+ if( sqlite3BtreeCursorHasMoved(p->uc.pCursor) ){
+ return handleMovedCursor(p);
+ }
+ return SQLITE_OK;
+}
+
+/*
+** Make sure the cursor p is ready to read or write the row to which it
+** was last positioned. Return an error code if an OOM fault or I/O error
+** prevents us from positioning the cursor to its correct position.
+**
+** If a MoveTo operation is pending on the given cursor, then do that
+** MoveTo now. If no move is pending, check to see if the row has been
+** deleted out from under the cursor and if it has, mark the row as
+** a NULL row.
+**
+** If the cursor is already pointing to the correct row and that row has
+** not been deleted out from under the cursor, then this routine is a no-op.
+*/
+SQLITE_PRIVATE int sqlite3VdbeCursorMoveto(VdbeCursor **pp, int *piCol){
+ VdbeCursor *p = *pp;
+ if( p->eCurType==CURTYPE_BTREE ){
+ if( p->deferredMoveto ){
+ int iMap;
+ if( p->aAltMap && (iMap = p->aAltMap[1+*piCol])>0 ){
+ *pp = p->pAltCursor;
+ *piCol = iMap - 1;
+ return SQLITE_OK;
+ }
+ return handleDeferredMoveto(p);
+ }
+ if( sqlite3BtreeCursorHasMoved(p->uc.pCursor) ){
+ return handleMovedCursor(p);
+ }
+ }
+ return SQLITE_OK;
+}
+
+/*
+** The following functions:
+**
+** sqlite3VdbeSerialType()
+** sqlite3VdbeSerialTypeLen()
+** sqlite3VdbeSerialLen()
+** sqlite3VdbeSerialPut()
+** sqlite3VdbeSerialGet()
+**
+** encapsulate the code that serializes values for storage in SQLite
+** data and index records. Each serialized value consists of a
+** 'serial-type' and a blob of data. The serial type is an 8-byte unsigned
+** integer, stored as a varint.
+**
+** In an SQLite index record, the serial type is stored directly before
+** the blob of data that it corresponds to. In a table record, all serial
+** types are stored at the start of the record, and the blobs of data at
+** the end. Hence these functions allow the caller to handle the
+** serial-type and data blob separately.
+**
+** The following table describes the various storage classes for data:
+**
+** serial type bytes of data type
+** -------------- --------------- ---------------
+** 0 0 NULL
+** 1 1 signed integer
+** 2 2 signed integer
+** 3 3 signed integer
+** 4 4 signed integer
+** 5 6 signed integer
+** 6 8 signed integer
+** 7 8 IEEE float
+** 8 0 Integer constant 0
+** 9 0 Integer constant 1
+** 10,11 reserved for expansion
+** N>=12 and even (N-12)/2 BLOB
+** N>=13 and odd (N-13)/2 text
+**
+** The 8 and 9 types were added in 3.3.0, file format 4. Prior versions
+** of SQLite will not understand those serial types.
+*/
+
+/*
+** Return the serial-type for the value stored in pMem.
+*/
+SQLITE_PRIVATE u32 sqlite3VdbeSerialType(Mem *pMem, int file_format, u32 *pLen){
+ int flags = pMem->flags;
+ u32 n;
+
+ assert( pLen!=0 );
+ if( flags&MEM_Null ){
+ *pLen = 0;
+ return 0;
+ }
+ if( flags&MEM_Int ){
+ /* Figure out whether to use 1, 2, 4, 6 or 8 bytes. */
+# define MAX_6BYTE ((((i64)0x00008000)<<32)-1)
+ i64 i = pMem->u.i;
+ u64 u;
+ if( i<0 ){
+ u = ~i;
+ }else{
+ u = i;
+ }
+ if( u<=127 ){
+ if( (i&1)==i && file_format>=4 ){
+ *pLen = 0;
+ return 8+(u32)u;
+ }else{
+ *pLen = 1;
+ return 1;
+ }
+ }
+ if( u<=32767 ){ *pLen = 2; return 2; }
+ if( u<=8388607 ){ *pLen = 3; return 3; }
+ if( u<=2147483647 ){ *pLen = 4; return 4; }
+ if( u<=MAX_6BYTE ){ *pLen = 6; return 5; }
+ *pLen = 8;
+ return 6;
+ }
+ if( flags&MEM_Real ){
+ *pLen = 8;
+ return 7;
+ }
+ assert( pMem->db->mallocFailed || flags&(MEM_Str|MEM_Blob) );
+ assert( pMem->n>=0 );
+ n = (u32)pMem->n;
+ if( flags & MEM_Zero ){
+ n += pMem->u.nZero;
+ }
+ *pLen = n;
+ return ((n*2) + 12 + ((flags&MEM_Str)!=0));
+}
+
+/*
+** The sizes for serial types less than 128
+*/
+static const u8 sqlite3SmallTypeSizes[] = {
+ /* 0 1 2 3 4 5 6 7 8 9 */
+/* 0 */ 0, 1, 2, 3, 4, 6, 8, 8, 0, 0,
+/* 10 */ 0, 0, 0, 0, 1, 1, 2, 2, 3, 3,
+/* 20 */ 4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
+/* 30 */ 9, 9, 10, 10, 11, 11, 12, 12, 13, 13,
+/* 40 */ 14, 14, 15, 15, 16, 16, 17, 17, 18, 18,
+/* 50 */ 19, 19, 20, 20, 21, 21, 22, 22, 23, 23,
+/* 60 */ 24, 24, 25, 25, 26, 26, 27, 27, 28, 28,
+/* 70 */ 29, 29, 30, 30, 31, 31, 32, 32, 33, 33,
+/* 80 */ 34, 34, 35, 35, 36, 36, 37, 37, 38, 38,
+/* 90 */ 39, 39, 40, 40, 41, 41, 42, 42, 43, 43,
+/* 100 */ 44, 44, 45, 45, 46, 46, 47, 47, 48, 48,
+/* 110 */ 49, 49, 50, 50, 51, 51, 52, 52, 53, 53,
+/* 120 */ 54, 54, 55, 55, 56, 56, 57, 57
+};
+
+/*
+** Return the length of the data corresponding to the supplied serial-type.
+*/
+SQLITE_PRIVATE u32 sqlite3VdbeSerialTypeLen(u32 serial_type){
+ if( serial_type>=128 ){
+ return (serial_type-12)/2;
+ }else{
+ assert( serial_type<12
+ || sqlite3SmallTypeSizes[serial_type]==(serial_type - 12)/2 );
+ return sqlite3SmallTypeSizes[serial_type];
+ }
+}
+SQLITE_PRIVATE u8 sqlite3VdbeOneByteSerialTypeLen(u8 serial_type){
+ assert( serial_type<128 );
+ return sqlite3SmallTypeSizes[serial_type];
+}
+
+/*
+** If we are on an architecture with mixed-endian floating
+** points (ex: ARM7) then swap the lower 4 bytes with the
+** upper 4 bytes. Return the result.
+**
+** For most architectures, this is a no-op.
+**
+** (later): It is reported to me that the mixed-endian problem
+** on ARM7 is an issue with GCC, not with the ARM7 chip. It seems
+** that early versions of GCC stored the two words of a 64-bit
+** float in the wrong order. And that error has been propagated
+** ever since. The blame is not necessarily with GCC, though.
+** GCC might have just copying the problem from a prior compiler.
+** I am also told that newer versions of GCC that follow a different
+** ABI get the byte order right.
+**
+** Developers using SQLite on an ARM7 should compile and run their
+** application using -DSQLITE_DEBUG=1 at least once. With DEBUG
+** enabled, some asserts below will ensure that the byte order of
+** floating point values is correct.
+**
+** (2007-08-30) Frank van Vugt has studied this problem closely
+** and has send his findings to the SQLite developers. Frank
+** writes that some Linux kernels offer floating point hardware
+** emulation that uses only 32-bit mantissas instead of a full
+** 48-bits as required by the IEEE standard. (This is the
+** CONFIG_FPE_FASTFPE option.) On such systems, floating point
+** byte swapping becomes very complicated. To avoid problems,
+** the necessary byte swapping is carried out using a 64-bit integer
+** rather than a 64-bit float. Frank assures us that the code here
+** works for him. We, the developers, have no way to independently
+** verify this, but Frank seems to know what he is talking about
+** so we trust him.
+*/
+#ifdef SQLITE_MIXED_ENDIAN_64BIT_FLOAT
+static u64 floatSwap(u64 in){
+ union {
+ u64 r;
+ u32 i[2];
+ } u;
+ u32 t;
+
+ u.r = in;
+ t = u.i[0];
+ u.i[0] = u.i[1];
+ u.i[1] = t;
+ return u.r;
+}
+# define swapMixedEndianFloat(X) X = floatSwap(X)
+#else
+# define swapMixedEndianFloat(X)
+#endif
+
+/*
+** Write the serialized data blob for the value stored in pMem into
+** buf. It is assumed that the caller has allocated sufficient space.
+** Return the number of bytes written.
+**
+** nBuf is the amount of space left in buf[]. The caller is responsible
+** for allocating enough space to buf[] to hold the entire field, exclusive
+** of the pMem->u.nZero bytes for a MEM_Zero value.
+**
+** Return the number of bytes actually written into buf[]. The number
+** of bytes in the zero-filled tail is included in the return value only
+** if those bytes were zeroed in buf[].
+*/
+SQLITE_PRIVATE u32 sqlite3VdbeSerialPut(u8 *buf, Mem *pMem, u32 serial_type){
+ u32 len;
+
+ /* Integer and Real */
+ if( serial_type<=7 && serial_type>0 ){
+ u64 v;
+ u32 i;
+ if( serial_type==7 ){
+ assert( sizeof(v)==sizeof(pMem->u.r) );
+ memcpy(&v, &pMem->u.r, sizeof(v));
+ swapMixedEndianFloat(v);
+ }else{
+ v = pMem->u.i;
+ }
+ len = i = sqlite3SmallTypeSizes[serial_type];
+ assert( i>0 );
+ do{
+ buf[--i] = (u8)(v&0xFF);
+ v >>= 8;
+ }while( i );
+ return len;
+ }
+
+ /* String or blob */
+ if( serial_type>=12 ){
+ assert( pMem->n + ((pMem->flags & MEM_Zero)?pMem->u.nZero:0)
+ == (int)sqlite3VdbeSerialTypeLen(serial_type) );
+ len = pMem->n;
+ if( len>0 ) memcpy(buf, pMem->z, len);
+ return len;
+ }
+
+ /* NULL or constants 0 or 1 */
+ return 0;
+}
+
+/* Input "x" is a sequence of unsigned characters that represent a
+** big-endian integer. Return the equivalent native integer
+*/
+#define ONE_BYTE_INT(x) ((i8)(x)[0])
+#define TWO_BYTE_INT(x) (256*(i8)((x)[0])|(x)[1])
+#define THREE_BYTE_INT(x) (65536*(i8)((x)[0])|((x)[1]<<8)|(x)[2])
+#define FOUR_BYTE_UINT(x) (((u32)(x)[0]<<24)|((x)[1]<<16)|((x)[2]<<8)|(x)[3])
+#define FOUR_BYTE_INT(x) (16777216*(i8)((x)[0])|((x)[1]<<16)|((x)[2]<<8)|(x)[3])
+
+/*
+** Deserialize the data blob pointed to by buf as serial type serial_type
+** and store the result in pMem. Return the number of bytes read.
+**
+** This function is implemented as two separate routines for performance.
+** The few cases that require local variables are broken out into a separate
+** routine so that in most cases the overhead of moving the stack pointer
+** is avoided.
+*/
+static u32 SQLITE_NOINLINE serialGet(
+ const unsigned char *buf, /* Buffer to deserialize from */
+ u32 serial_type, /* Serial type to deserialize */
+ Mem *pMem /* Memory cell to write value into */
+){
+ u64 x = FOUR_BYTE_UINT(buf);
+ u32 y = FOUR_BYTE_UINT(buf+4);
+ x = (x<<32) + y;
+ if( serial_type==6 ){
+ /* EVIDENCE-OF: R-29851-52272 Value is a big-endian 64-bit
+ ** twos-complement integer. */
+ pMem->u.i = *(i64*)&x;
+ pMem->flags = MEM_Int;
+ testcase( pMem->u.i<0 );
+ }else{
+ /* EVIDENCE-OF: R-57343-49114 Value is a big-endian IEEE 754-2008 64-bit
+ ** floating point number. */
+#if !defined(NDEBUG) && !defined(SQLITE_OMIT_FLOATING_POINT)
+ /* Verify that integers and floating point values use the same
+ ** byte order. Or, that if SQLITE_MIXED_ENDIAN_64BIT_FLOAT is
+ ** defined that 64-bit floating point values really are mixed
+ ** endian.
+ */
+ static const u64 t1 = ((u64)0x3ff00000)<<32;
+ static const double r1 = 1.0;
+ u64 t2 = t1;
+ swapMixedEndianFloat(t2);
+ assert( sizeof(r1)==sizeof(t2) && memcmp(&r1, &t2, sizeof(r1))==0 );
+#endif
+ assert( sizeof(x)==8 && sizeof(pMem->u.r)==8 );
+ swapMixedEndianFloat(x);
+ memcpy(&pMem->u.r, &x, sizeof(x));
+ pMem->flags = sqlite3IsNaN(pMem->u.r) ? MEM_Null : MEM_Real;
+ }
+ return 8;
+}
+SQLITE_PRIVATE u32 sqlite3VdbeSerialGet(
+ const unsigned char *buf, /* Buffer to deserialize from */
+ u32 serial_type, /* Serial type to deserialize */
+ Mem *pMem /* Memory cell to write value into */
+){
+ switch( serial_type ){
+ case 10: /* Reserved for future use */
+ case 11: /* Reserved for future use */
+ case 0: { /* Null */
+ /* EVIDENCE-OF: R-24078-09375 Value is a NULL. */
+ pMem->flags = MEM_Null;
+ break;
+ }
+ case 1: {
+ /* EVIDENCE-OF: R-44885-25196 Value is an 8-bit twos-complement
+ ** integer. */
+ pMem->u.i = ONE_BYTE_INT(buf);
+ pMem->flags = MEM_Int;
+ testcase( pMem->u.i<0 );
+ return 1;
+ }
+ case 2: { /* 2-byte signed integer */
+ /* EVIDENCE-OF: R-49794-35026 Value is a big-endian 16-bit
+ ** twos-complement integer. */
+ pMem->u.i = TWO_BYTE_INT(buf);
+ pMem->flags = MEM_Int;
+ testcase( pMem->u.i<0 );
+ return 2;
+ }
+ case 3: { /* 3-byte signed integer */
+ /* EVIDENCE-OF: R-37839-54301 Value is a big-endian 24-bit
+ ** twos-complement integer. */
+ pMem->u.i = THREE_BYTE_INT(buf);
+ pMem->flags = MEM_Int;
+ testcase( pMem->u.i<0 );
+ return 3;
+ }
+ case 4: { /* 4-byte signed integer */
+ /* EVIDENCE-OF: R-01849-26079 Value is a big-endian 32-bit
+ ** twos-complement integer. */
+ pMem->u.i = FOUR_BYTE_INT(buf);
+#ifdef __HP_cc
+ /* Work around a sign-extension bug in the HP compiler for HP/UX */
+ if( buf[0]&0x80 ) pMem->u.i |= 0xffffffff80000000LL;
+#endif
+ pMem->flags = MEM_Int;
+ testcase( pMem->u.i<0 );
+ return 4;
+ }
+ case 5: { /* 6-byte signed integer */
+ /* EVIDENCE-OF: R-50385-09674 Value is a big-endian 48-bit
+ ** twos-complement integer. */
+ pMem->u.i = FOUR_BYTE_UINT(buf+2) + (((i64)1)<<32)*TWO_BYTE_INT(buf);
+ pMem->flags = MEM_Int;
+ testcase( pMem->u.i<0 );
+ return 6;
+ }
+ case 6: /* 8-byte signed integer */
+ case 7: { /* IEEE floating point */
+ /* These use local variables, so do them in a separate routine
+ ** to avoid having to move the frame pointer in the common case */
+ return serialGet(buf,serial_type,pMem);
+ }
+ case 8: /* Integer 0 */
+ case 9: { /* Integer 1 */
+ /* EVIDENCE-OF: R-12976-22893 Value is the integer 0. */
+ /* EVIDENCE-OF: R-18143-12121 Value is the integer 1. */
+ pMem->u.i = serial_type-8;
+ pMem->flags = MEM_Int;
+ return 0;
+ }
+ default: {
+ /* EVIDENCE-OF: R-14606-31564 Value is a BLOB that is (N-12)/2 bytes in
+ ** length.
+ ** EVIDENCE-OF: R-28401-00140 Value is a string in the text encoding and
+ ** (N-13)/2 bytes in length. */
+ static const u16 aFlag[] = { MEM_Blob|MEM_Ephem, MEM_Str|MEM_Ephem };
+ pMem->z = (char *)buf;
+ pMem->n = (serial_type-12)/2;
+ pMem->flags = aFlag[serial_type&1];
+ return pMem->n;
+ }
+ }
+ return 0;
+}
+/*
+** This routine is used to allocate sufficient space for an UnpackedRecord
+** structure large enough to be used with sqlite3VdbeRecordUnpack() if
+** the first argument is a pointer to KeyInfo structure pKeyInfo.
+**
+** The space is either allocated using sqlite3DbMallocRaw() or from within
+** the unaligned buffer passed via the second and third arguments (presumably
+** stack space). If the former, then *ppFree is set to a pointer that should
+** be eventually freed by the caller using sqlite3DbFree(). Or, if the
+** allocation comes from the pSpace/szSpace buffer, *ppFree is set to NULL
+** before returning.
+**
+** If an OOM error occurs, NULL is returned.
+*/
+SQLITE_PRIVATE UnpackedRecord *sqlite3VdbeAllocUnpackedRecord(
+ KeyInfo *pKeyInfo /* Description of the record */
+){
+ UnpackedRecord *p; /* Unpacked record to return */
+ int nByte; /* Number of bytes required for *p */
+ nByte = ROUND8(sizeof(UnpackedRecord)) + sizeof(Mem)*(pKeyInfo->nField+1);
+ p = (UnpackedRecord *)sqlite3DbMallocRaw(pKeyInfo->db, nByte);
+ if( !p ) return 0;
+ p->aMem = (Mem*)&((char*)p)[ROUND8(sizeof(UnpackedRecord))];
+ assert( pKeyInfo->aSortOrder!=0 );
+ p->pKeyInfo = pKeyInfo;
+ p->nField = pKeyInfo->nField + 1;
+ return p;
+}
+
+/*
+** Given the nKey-byte encoding of a record in pKey[], populate the
+** UnpackedRecord structure indicated by the fourth argument with the
+** contents of the decoded record.
+*/
+SQLITE_PRIVATE void sqlite3VdbeRecordUnpack(
+ KeyInfo *pKeyInfo, /* Information about the record format */
+ int nKey, /* Size of the binary record */
+ const void *pKey, /* The binary record */
+ UnpackedRecord *p /* Populate this structure before returning. */
+){
+ const unsigned char *aKey = (const unsigned char *)pKey;
+ int d;
+ u32 idx; /* Offset in aKey[] to read from */
+ u16 u; /* Unsigned loop counter */
+ u32 szHdr;
+ Mem *pMem = p->aMem;
+
+ p->default_rc = 0;
+ assert( EIGHT_BYTE_ALIGNMENT(pMem) );
+ idx = getVarint32(aKey, szHdr);
+ d = szHdr;
+ u = 0;
+ while( idx<szHdr && d<=nKey ){
+ u32 serial_type;
+
+ idx += getVarint32(&aKey[idx], serial_type);
+ pMem->enc = pKeyInfo->enc;
+ pMem->db = pKeyInfo->db;
+ /* pMem->flags = 0; // sqlite3VdbeSerialGet() will set this for us */
+ pMem->szMalloc = 0;
+ pMem->z = 0;
+ d += sqlite3VdbeSerialGet(&aKey[d], serial_type, pMem);
+ pMem++;
+ if( (++u)>=p->nField ) break;
+ }
+ assert( u<=pKeyInfo->nField + 1 );
+ p->nField = u;
+}
+
+#if SQLITE_DEBUG
+/*
+** This function compares two index or table record keys in the same way
+** as the sqlite3VdbeRecordCompare() routine. Unlike VdbeRecordCompare(),
+** this function deserializes and compares values using the
+** sqlite3VdbeSerialGet() and sqlite3MemCompare() functions. It is used
+** in assert() statements to ensure that the optimized code in
+** sqlite3VdbeRecordCompare() returns results with these two primitives.
+**
+** Return true if the result of comparison is equivalent to desiredResult.
+** Return false if there is a disagreement.
+*/
+static int vdbeRecordCompareDebug(
+ int nKey1, const void *pKey1, /* Left key */
+ const UnpackedRecord *pPKey2, /* Right key */
+ int desiredResult /* Correct answer */
+){
+ u32 d1; /* Offset into aKey[] of next data element */
+ u32 idx1; /* Offset into aKey[] of next header element */
+ u32 szHdr1; /* Number of bytes in header */
+ int i = 0;
+ int rc = 0;
+ const unsigned char *aKey1 = (const unsigned char *)pKey1;
+ KeyInfo *pKeyInfo;
+ Mem mem1;
+
+ pKeyInfo = pPKey2->pKeyInfo;
+ if( pKeyInfo->db==0 ) return 1;
+ mem1.enc = pKeyInfo->enc;
+ mem1.db = pKeyInfo->db;
+ /* mem1.flags = 0; // Will be initialized by sqlite3VdbeSerialGet() */
+ VVA_ONLY( mem1.szMalloc = 0; ) /* Only needed by assert() statements */
+
+ /* Compilers may complain that mem1.u.i is potentially uninitialized.
+ ** We could initialize it, as shown here, to silence those complaints.
+ ** But in fact, mem1.u.i will never actually be used uninitialized, and doing
+ ** the unnecessary initialization has a measurable negative performance
+ ** impact, since this routine is a very high runner. And so, we choose
+ ** to ignore the compiler warnings and leave this variable uninitialized.
+ */
+ /* mem1.u.i = 0; // not needed, here to silence compiler warning */
+
+ idx1 = getVarint32(aKey1, szHdr1);
+ if( szHdr1>98307 ) return SQLITE_CORRUPT;
+ d1 = szHdr1;
+ assert( pKeyInfo->nField+pKeyInfo->nXField>=pPKey2->nField || CORRUPT_DB );
+ assert( pKeyInfo->aSortOrder!=0 );
+ assert( pKeyInfo->nField>0 );
+ assert( idx1<=szHdr1 || CORRUPT_DB );
+ do{
+ u32 serial_type1;
+
+ /* Read the serial types for the next element in each key. */
+ idx1 += getVarint32( aKey1+idx1, serial_type1 );
+
+ /* Verify that there is enough key space remaining to avoid
+ ** a buffer overread. The "d1+serial_type1+2" subexpression will
+ ** always be greater than or equal to the amount of required key space.
+ ** Use that approximation to avoid the more expensive call to
+ ** sqlite3VdbeSerialTypeLen() in the common case.
+ */
+ if( d1+serial_type1+2>(u32)nKey1
+ && d1+sqlite3VdbeSerialTypeLen(serial_type1)>(u32)nKey1
+ ){
+ break;
+ }
+
+ /* Extract the values to be compared.
+ */
+ d1 += sqlite3VdbeSerialGet(&aKey1[d1], serial_type1, &mem1);
+
+ /* Do the comparison
+ */
+ rc = sqlite3MemCompare(&mem1, &pPKey2->aMem[i], pKeyInfo->aColl[i]);
+ if( rc!=0 ){
+ assert( mem1.szMalloc==0 ); /* See comment below */
+ if( pKeyInfo->aSortOrder[i] ){
+ rc = -rc; /* Invert the result for DESC sort order. */
+ }
+ goto debugCompareEnd;
+ }
+ i++;
+ }while( idx1<szHdr1 && i<pPKey2->nField );
+
+ /* No memory allocation is ever used on mem1. Prove this using
+ ** the following assert(). If the assert() fails, it indicates a
+ ** memory leak and a need to call sqlite3VdbeMemRelease(&mem1).
+ */
+ assert( mem1.szMalloc==0 );
+
+ /* rc==0 here means that one of the keys ran out of fields and
+ ** all the fields up to that point were equal. Return the default_rc
+ ** value. */
+ rc = pPKey2->default_rc;
+
+debugCompareEnd:
+ if( desiredResult==0 && rc==0 ) return 1;
+ if( desiredResult<0 && rc<0 ) return 1;
+ if( desiredResult>0 && rc>0 ) return 1;
+ if( CORRUPT_DB ) return 1;
+ if( pKeyInfo->db->mallocFailed ) return 1;
+ return 0;
+}
+#endif
+
+#if SQLITE_DEBUG
+/*
+** Count the number of fields (a.k.a. columns) in the record given by
+** pKey,nKey. The verify that this count is less than or equal to the
+** limit given by pKeyInfo->nField + pKeyInfo->nXField.
+**
+** If this constraint is not satisfied, it means that the high-speed
+** vdbeRecordCompareInt() and vdbeRecordCompareString() routines will
+** not work correctly. If this assert() ever fires, it probably means
+** that the KeyInfo.nField or KeyInfo.nXField values were computed
+** incorrectly.
+*/
+static void vdbeAssertFieldCountWithinLimits(
+ int nKey, const void *pKey, /* The record to verify */
+ const KeyInfo *pKeyInfo /* Compare size with this KeyInfo */
+){
+ int nField = 0;
+ u32 szHdr;
+ u32 idx;
+ u32 notUsed;
+ const unsigned char *aKey = (const unsigned char*)pKey;
+
+ if( CORRUPT_DB ) return;
+ idx = getVarint32(aKey, szHdr);
+ assert( nKey>=0 );
+ assert( szHdr<=(u32)nKey );
+ while( idx<szHdr ){
+ idx += getVarint32(aKey+idx, notUsed);
+ nField++;
+ }
+ assert( nField <= pKeyInfo->nField+pKeyInfo->nXField );
+}
+#else
+# define vdbeAssertFieldCountWithinLimits(A,B,C)
+#endif
+
+/*
+** Both *pMem1 and *pMem2 contain string values. Compare the two values
+** using the collation sequence pColl. As usual, return a negative , zero
+** or positive value if *pMem1 is less than, equal to or greater than
+** *pMem2, respectively. Similar in spirit to "rc = (*pMem1) - (*pMem2);".
+*/
+static int vdbeCompareMemString(
+ const Mem *pMem1,
+ const Mem *pMem2,
+ const CollSeq *pColl,
+ u8 *prcErr /* If an OOM occurs, set to SQLITE_NOMEM */
+){
+ if( pMem1->enc==pColl->enc ){
+ /* The strings are already in the correct encoding. Call the
+ ** comparison function directly */
+ return pColl->xCmp(pColl->pUser,pMem1->n,pMem1->z,pMem2->n,pMem2->z);
+ }else{
+ int rc;
+ const void *v1, *v2;
+ int n1, n2;
+ Mem c1;
+ Mem c2;
+ sqlite3VdbeMemInit(&c1, pMem1->db, MEM_Null);
+ sqlite3VdbeMemInit(&c2, pMem1->db, MEM_Null);
+ sqlite3VdbeMemShallowCopy(&c1, pMem1, MEM_Ephem);
+ sqlite3VdbeMemShallowCopy(&c2, pMem2, MEM_Ephem);
+ v1 = sqlite3ValueText((sqlite3_value*)&c1, pColl->enc);
+ n1 = v1==0 ? 0 : c1.n;
+ v2 = sqlite3ValueText((sqlite3_value*)&c2, pColl->enc);
+ n2 = v2==0 ? 0 : c2.n;
+ rc = pColl->xCmp(pColl->pUser, n1, v1, n2, v2);
+ if( (v1==0 || v2==0) && prcErr ) *prcErr = SQLITE_NOMEM_BKPT;
+ sqlite3VdbeMemRelease(&c1);
+ sqlite3VdbeMemRelease(&c2);
+ return rc;
+ }
+}
+
+/*
+** The input pBlob is guaranteed to be a Blob that is not marked
+** with MEM_Zero. Return true if it could be a zero-blob.
+*/
+static int isAllZero(const char *z, int n){
+ int i;
+ for(i=0; i<n; i++){
+ if( z[i] ) return 0;
+ }
+ return 1;
+}
+
+/*
+** Compare two blobs. Return negative, zero, or positive if the first
+** is less than, equal to, or greater than the second, respectively.
+** If one blob is a prefix of the other, then the shorter is the lessor.
+*/
+static SQLITE_NOINLINE int sqlite3BlobCompare(const Mem *pB1, const Mem *pB2){
+ int c;
+ int n1 = pB1->n;
+ int n2 = pB2->n;
+
+ /* It is possible to have a Blob value that has some non-zero content
+ ** followed by zero content. But that only comes up for Blobs formed
+ ** by the OP_MakeRecord opcode, and such Blobs never get passed into
+ ** sqlite3MemCompare(). */
+ assert( (pB1->flags & MEM_Zero)==0 || n1==0 );
+ assert( (pB2->flags & MEM_Zero)==0 || n2==0 );
+
+ if( (pB1->flags|pB2->flags) & MEM_Zero ){
+ if( pB1->flags & pB2->flags & MEM_Zero ){
+ return pB1->u.nZero - pB2->u.nZero;
+ }else if( pB1->flags & MEM_Zero ){
+ if( !isAllZero(pB2->z, pB2->n) ) return -1;
+ return pB1->u.nZero - n2;
+ }else{
+ if( !isAllZero(pB1->z, pB1->n) ) return +1;
+ return n1 - pB2->u.nZero;
+ }
+ }
+ c = memcmp(pB1->z, pB2->z, n1>n2 ? n2 : n1);
+ if( c ) return c;
+ return n1 - n2;
+}
+
+/*
+** Do a comparison between a 64-bit signed integer and a 64-bit floating-point
+** number. Return negative, zero, or positive if the first (i64) is less than,
+** equal to, or greater than the second (double).
+*/
+static int sqlite3IntFloatCompare(i64 i, double r){
+ if( sizeof(LONGDOUBLE_TYPE)>8 ){
+ LONGDOUBLE_TYPE x = (LONGDOUBLE_TYPE)i;
+ if( x<r ) return -1;
+ if( x>r ) return +1;
+ return 0;
+ }else{
+ i64 y;
+ double s;
+ if( r<-9223372036854775808.0 ) return +1;
+ if( r>9223372036854775807.0 ) return -1;
+ y = (i64)r;
+ if( i<y ) return -1;
+ if( i>y ){
+ if( y==SMALLEST_INT64 && r>0.0 ) return -1;
+ return +1;
+ }
+ s = (double)i;
+ if( s<r ) return -1;
+ if( s>r ) return +1;
+ return 0;
+ }
+}
+
+/*
+** Compare the values contained by the two memory cells, returning
+** negative, zero or positive if pMem1 is less than, equal to, or greater
+** than pMem2. Sorting order is NULL's first, followed by numbers (integers
+** and reals) sorted numerically, followed by text ordered by the collating
+** sequence pColl and finally blob's ordered by memcmp().
+**
+** Two NULL values are considered equal by this function.
+*/
+SQLITE_PRIVATE int sqlite3MemCompare(const Mem *pMem1, const Mem *pMem2, const CollSeq *pColl){
+ int f1, f2;
+ int combined_flags;
+
+ f1 = pMem1->flags;
+ f2 = pMem2->flags;
+ combined_flags = f1|f2;
+ assert( (combined_flags & MEM_RowSet)==0 );
+
+ /* If one value is NULL, it is less than the other. If both values
+ ** are NULL, return 0.
+ */
+ if( combined_flags&MEM_Null ){
+ return (f2&MEM_Null) - (f1&MEM_Null);
+ }
+
+ /* At least one of the two values is a number
+ */
+ if( combined_flags&(MEM_Int|MEM_Real) ){
+ if( (f1 & f2 & MEM_Int)!=0 ){
+ if( pMem1->u.i < pMem2->u.i ) return -1;
+ if( pMem1->u.i > pMem2->u.i ) return +1;
+ return 0;
+ }
+ if( (f1 & f2 & MEM_Real)!=0 ){
+ if( pMem1->u.r < pMem2->u.r ) return -1;
+ if( pMem1->u.r > pMem2->u.r ) return +1;
+ return 0;
+ }
+ if( (f1&MEM_Int)!=0 ){
+ if( (f2&MEM_Real)!=0 ){
+ return sqlite3IntFloatCompare(pMem1->u.i, pMem2->u.r);
+ }else{
+ return -1;
+ }
+ }
+ if( (f1&MEM_Real)!=0 ){
+ if( (f2&MEM_Int)!=0 ){
+ return -sqlite3IntFloatCompare(pMem2->u.i, pMem1->u.r);
+ }else{
+ return -1;
+ }
+ }
+ return +1;
+ }
+
+ /* If one value is a string and the other is a blob, the string is less.
+ ** If both are strings, compare using the collating functions.
+ */
+ if( combined_flags&MEM_Str ){
+ if( (f1 & MEM_Str)==0 ){
+ return 1;
+ }
+ if( (f2 & MEM_Str)==0 ){
+ return -1;
+ }
+
+ assert( pMem1->enc==pMem2->enc || pMem1->db->mallocFailed );
+ assert( pMem1->enc==SQLITE_UTF8 ||
+ pMem1->enc==SQLITE_UTF16LE || pMem1->enc==SQLITE_UTF16BE );
+
+ /* The collation sequence must be defined at this point, even if
+ ** the user deletes the collation sequence after the vdbe program is
+ ** compiled (this was not always the case).
+ */
+ assert( !pColl || pColl->xCmp );
+
+ if( pColl ){
+ return vdbeCompareMemString(pMem1, pMem2, pColl, 0);
+ }
+ /* If a NULL pointer was passed as the collate function, fall through
+ ** to the blob case and use memcmp(). */
+ }
+
+ /* Both values must be blobs. Compare using memcmp(). */
+ return sqlite3BlobCompare(pMem1, pMem2);
+}
+
+
+/*
+** The first argument passed to this function is a serial-type that
+** corresponds to an integer - all values between 1 and 9 inclusive
+** except 7. The second points to a buffer containing an integer value
+** serialized according to serial_type. This function deserializes
+** and returns the value.
+*/
+static i64 vdbeRecordDecodeInt(u32 serial_type, const u8 *aKey){
+ u32 y;
+ assert( CORRUPT_DB || (serial_type>=1 && serial_type<=9 && serial_type!=7) );
+ switch( serial_type ){
+ case 0:
+ case 1:
+ testcase( aKey[0]&0x80 );
+ return ONE_BYTE_INT(aKey);
+ case 2:
+ testcase( aKey[0]&0x80 );
+ return TWO_BYTE_INT(aKey);
+ case 3:
+ testcase( aKey[0]&0x80 );
+ return THREE_BYTE_INT(aKey);
+ case 4: {
+ testcase( aKey[0]&0x80 );
+ y = FOUR_BYTE_UINT(aKey);
+ return (i64)*(int*)&y;
+ }
+ case 5: {
+ testcase( aKey[0]&0x80 );
+ return FOUR_BYTE_UINT(aKey+2) + (((i64)1)<<32)*TWO_BYTE_INT(aKey);
+ }
+ case 6: {
+ u64 x = FOUR_BYTE_UINT(aKey);
+ testcase( aKey[0]&0x80 );
+ x = (x<<32) | FOUR_BYTE_UINT(aKey+4);
+ return (i64)*(i64*)&x;
+ }
+ }
+
+ return (serial_type - 8);
+}
+
+/*
+** This function compares the two table rows or index records
+** specified by {nKey1, pKey1} and pPKey2. It returns a negative, zero
+** or positive integer if key1 is less than, equal to or
+** greater than key2. The {nKey1, pKey1} key must be a blob
+** created by the OP_MakeRecord opcode of the VDBE. The pPKey2
+** key must be a parsed key such as obtained from
+** sqlite3VdbeParseRecord.
+**
+** If argument bSkip is non-zero, it is assumed that the caller has already
+** determined that the first fields of the keys are equal.
+**
+** Key1 and Key2 do not have to contain the same number of fields. If all
+** fields that appear in both keys are equal, then pPKey2->default_rc is
+** returned.
+**
+** If database corruption is discovered, set pPKey2->errCode to
+** SQLITE_CORRUPT and return 0. If an OOM error is encountered,
+** pPKey2->errCode is set to SQLITE_NOMEM and, if it is not NULL, the
+** malloc-failed flag set on database handle (pPKey2->pKeyInfo->db).
+*/
+SQLITE_PRIVATE int sqlite3VdbeRecordCompareWithSkip(
+ int nKey1, const void *pKey1, /* Left key */
+ UnpackedRecord *pPKey2, /* Right key */
+ int bSkip /* If true, skip the first field */
+){
+ u32 d1; /* Offset into aKey[] of next data element */
+ int i; /* Index of next field to compare */
+ u32 szHdr1; /* Size of record header in bytes */
+ u32 idx1; /* Offset of first type in header */
+ int rc = 0; /* Return value */
+ Mem *pRhs = pPKey2->aMem; /* Next field of pPKey2 to compare */
+ KeyInfo *pKeyInfo = pPKey2->pKeyInfo;
+ const unsigned char *aKey1 = (const unsigned char *)pKey1;
+ Mem mem1;
+
+ /* If bSkip is true, then the caller has already determined that the first
+ ** two elements in the keys are equal. Fix the various stack variables so
+ ** that this routine begins comparing at the second field. */
+ if( bSkip ){
+ u32 s1;
+ idx1 = 1 + getVarint32(&aKey1[1], s1);
+ szHdr1 = aKey1[0];
+ d1 = szHdr1 + sqlite3VdbeSerialTypeLen(s1);
+ i = 1;
+ pRhs++;
+ }else{
+ idx1 = getVarint32(aKey1, szHdr1);
+ d1 = szHdr1;
+ if( d1>(unsigned)nKey1 ){
+ pPKey2->errCode = (u8)SQLITE_CORRUPT_BKPT;
+ return 0; /* Corruption */
+ }
+ i = 0;
+ }
+
+ VVA_ONLY( mem1.szMalloc = 0; ) /* Only needed by assert() statements */
+ assert( pPKey2->pKeyInfo->nField+pPKey2->pKeyInfo->nXField>=pPKey2->nField
+ || CORRUPT_DB );
+ assert( pPKey2->pKeyInfo->aSortOrder!=0 );
+ assert( pPKey2->pKeyInfo->nField>0 );
+ assert( idx1<=szHdr1 || CORRUPT_DB );
+ do{
+ u32 serial_type;
+
+ /* RHS is an integer */
+ if( pRhs->flags & MEM_Int ){
+ serial_type = aKey1[idx1];
+ testcase( serial_type==12 );
+ if( serial_type>=10 ){
+ rc = +1;
+ }else if( serial_type==0 ){
+ rc = -1;
+ }else if( serial_type==7 ){
+ sqlite3VdbeSerialGet(&aKey1[d1], serial_type, &mem1);
+ rc = -sqlite3IntFloatCompare(pRhs->u.i, mem1.u.r);
+ }else{
+ i64 lhs = vdbeRecordDecodeInt(serial_type, &aKey1[d1]);
+ i64 rhs = pRhs->u.i;
+ if( lhs<rhs ){
+ rc = -1;
+ }else if( lhs>rhs ){
+ rc = +1;
+ }
+ }
+ }
+
+ /* RHS is real */
+ else if( pRhs->flags & MEM_Real ){
+ serial_type = aKey1[idx1];
+ if( serial_type>=10 ){
+ /* Serial types 12 or greater are strings and blobs (greater than
+ ** numbers). Types 10 and 11 are currently "reserved for future
+ ** use", so it doesn't really matter what the results of comparing
+ ** them to numberic values are. */
+ rc = +1;
+ }else if( serial_type==0 ){
+ rc = -1;
+ }else{
+ sqlite3VdbeSerialGet(&aKey1[d1], serial_type, &mem1);
+ if( serial_type==7 ){
+ if( mem1.u.r<pRhs->u.r ){
+ rc = -1;
+ }else if( mem1.u.r>pRhs->u.r ){
+ rc = +1;
+ }
+ }else{
+ rc = sqlite3IntFloatCompare(mem1.u.i, pRhs->u.r);
+ }
+ }
+ }
+
+ /* RHS is a string */
+ else if( pRhs->flags & MEM_Str ){
+ getVarint32(&aKey1[idx1], serial_type);
+ testcase( serial_type==12 );
+ if( serial_type<12 ){
+ rc = -1;
+ }else if( !(serial_type & 0x01) ){
+ rc = +1;
+ }else{
+ mem1.n = (serial_type - 12) / 2;
+ testcase( (d1+mem1.n)==(unsigned)nKey1 );
+ testcase( (d1+mem1.n+1)==(unsigned)nKey1 );
+ if( (d1+mem1.n) > (unsigned)nKey1 ){
+ pPKey2->errCode = (u8)SQLITE_CORRUPT_BKPT;
+ return 0; /* Corruption */
+ }else if( pKeyInfo->aColl[i] ){
+ mem1.enc = pKeyInfo->enc;
+ mem1.db = pKeyInfo->db;
+ mem1.flags = MEM_Str;
+ mem1.z = (char*)&aKey1[d1];
+ rc = vdbeCompareMemString(
+ &mem1, pRhs, pKeyInfo->aColl[i], &pPKey2->errCode
+ );
+ }else{
+ int nCmp = MIN(mem1.n, pRhs->n);
+ rc = memcmp(&aKey1[d1], pRhs->z, nCmp);
+ if( rc==0 ) rc = mem1.n - pRhs->n;
+ }
+ }
+ }
+
+ /* RHS is a blob */
+ else if( pRhs->flags & MEM_Blob ){
+ assert( (pRhs->flags & MEM_Zero)==0 || pRhs->n==0 );
+ getVarint32(&aKey1[idx1], serial_type);
+ testcase( serial_type==12 );
+ if( serial_type<12 || (serial_type & 0x01) ){
+ rc = -1;
+ }else{
+ int nStr = (serial_type - 12) / 2;
+ testcase( (d1+nStr)==(unsigned)nKey1 );
+ testcase( (d1+nStr+1)==(unsigned)nKey1 );
+ if( (d1+nStr) > (unsigned)nKey1 ){
+ pPKey2->errCode = (u8)SQLITE_CORRUPT_BKPT;
+ return 0; /* Corruption */
+ }else if( pRhs->flags & MEM_Zero ){
+ if( !isAllZero((const char*)&aKey1[d1],nStr) ){
+ rc = 1;
+ }else{
+ rc = nStr - pRhs->u.nZero;
+ }
+ }else{
+ int nCmp = MIN(nStr, pRhs->n);
+ rc = memcmp(&aKey1[d1], pRhs->z, nCmp);
+ if( rc==0 ) rc = nStr - pRhs->n;
+ }
+ }
+ }
+
+ /* RHS is null */
+ else{
+ serial_type = aKey1[idx1];
+ rc = (serial_type!=0);
+ }
+
+ if( rc!=0 ){
+ if( pKeyInfo->aSortOrder[i] ){
+ rc = -rc;
+ }
+ assert( vdbeRecordCompareDebug(nKey1, pKey1, pPKey2, rc) );
+ assert( mem1.szMalloc==0 ); /* See comment below */
+ return rc;
+ }
+
+ i++;
+ pRhs++;
+ d1 += sqlite3VdbeSerialTypeLen(serial_type);
+ idx1 += sqlite3VarintLen(serial_type);
+ }while( idx1<(unsigned)szHdr1 && i<pPKey2->nField && d1<=(unsigned)nKey1 );
+
+ /* No memory allocation is ever used on mem1. Prove this using
+ ** the following assert(). If the assert() fails, it indicates a
+ ** memory leak and a need to call sqlite3VdbeMemRelease(&mem1). */
+ assert( mem1.szMalloc==0 );
+
+ /* rc==0 here means that one or both of the keys ran out of fields and
+ ** all the fields up to that point were equal. Return the default_rc
+ ** value. */
+ assert( CORRUPT_DB
+ || vdbeRecordCompareDebug(nKey1, pKey1, pPKey2, pPKey2->default_rc)
+ || pKeyInfo->db->mallocFailed
+ );
+ pPKey2->eqSeen = 1;
+ return pPKey2->default_rc;
+}
+SQLITE_PRIVATE int sqlite3VdbeRecordCompare(
+ int nKey1, const void *pKey1, /* Left key */
+ UnpackedRecord *pPKey2 /* Right key */
+){
+ return sqlite3VdbeRecordCompareWithSkip(nKey1, pKey1, pPKey2, 0);
+}
+
+
+/*
+** This function is an optimized version of sqlite3VdbeRecordCompare()
+** that (a) the first field of pPKey2 is an integer, and (b) the
+** size-of-header varint at the start of (pKey1/nKey1) fits in a single
+** byte (i.e. is less than 128).
+**
+** To avoid concerns about buffer overreads, this routine is only used
+** on schemas where the maximum valid header size is 63 bytes or less.
+*/
+static int vdbeRecordCompareInt(
+ int nKey1, const void *pKey1, /* Left key */
+ UnpackedRecord *pPKey2 /* Right key */
+){
+ const u8 *aKey = &((const u8*)pKey1)[*(const u8*)pKey1 & 0x3F];
+ int serial_type = ((const u8*)pKey1)[1];
+ int res;
+ u32 y;
+ u64 x;
+ i64 v;
+ i64 lhs;
+
+ vdbeAssertFieldCountWithinLimits(nKey1, pKey1, pPKey2->pKeyInfo);
+ assert( (*(u8*)pKey1)<=0x3F || CORRUPT_DB );
+ switch( serial_type ){
+ case 1: { /* 1-byte signed integer */
+ lhs = ONE_BYTE_INT(aKey);
+ testcase( lhs<0 );
+ break;
+ }
+ case 2: { /* 2-byte signed integer */
+ lhs = TWO_BYTE_INT(aKey);
+ testcase( lhs<0 );
+ break;
+ }
+ case 3: { /* 3-byte signed integer */
+ lhs = THREE_BYTE_INT(aKey);
+ testcase( lhs<0 );
+ break;
+ }
+ case 4: { /* 4-byte signed integer */
+ y = FOUR_BYTE_UINT(aKey);
+ lhs = (i64)*(int*)&y;
+ testcase( lhs<0 );
+ break;
+ }
+ case 5: { /* 6-byte signed integer */
+ lhs = FOUR_BYTE_UINT(aKey+2) + (((i64)1)<<32)*TWO_BYTE_INT(aKey);
+ testcase( lhs<0 );
+ break;
+ }
+ case 6: { /* 8-byte signed integer */
+ x = FOUR_BYTE_UINT(aKey);
+ x = (x<<32) | FOUR_BYTE_UINT(aKey+4);
+ lhs = *(i64*)&x;
+ testcase( lhs<0 );
+ break;
+ }
+ case 8:
+ lhs = 0;
+ break;
+ case 9:
+ lhs = 1;
+ break;
+
+ /* This case could be removed without changing the results of running
+ ** this code. Including it causes gcc to generate a faster switch
+ ** statement (since the range of switch targets now starts at zero and
+ ** is contiguous) but does not cause any duplicate code to be generated
+ ** (as gcc is clever enough to combine the two like cases). Other
+ ** compilers might be similar. */
+ case 0: case 7:
+ return sqlite3VdbeRecordCompare(nKey1, pKey1, pPKey2);
+
+ default:
+ return sqlite3VdbeRecordCompare(nKey1, pKey1, pPKey2);
+ }
+
+ v = pPKey2->aMem[0].u.i;
+ if( v>lhs ){
+ res = pPKey2->r1;
+ }else if( v<lhs ){
+ res = pPKey2->r2;
+ }else if( pPKey2->nField>1 ){
+ /* The first fields of the two keys are equal. Compare the trailing
+ ** fields. */
+ res = sqlite3VdbeRecordCompareWithSkip(nKey1, pKey1, pPKey2, 1);
+ }else{
+ /* The first fields of the two keys are equal and there are no trailing
+ ** fields. Return pPKey2->default_rc in this case. */
+ res = pPKey2->default_rc;
+ pPKey2->eqSeen = 1;
+ }
+
+ assert( vdbeRecordCompareDebug(nKey1, pKey1, pPKey2, res) );
+ return res;
+}
+
+/*
+** This function is an optimized version of sqlite3VdbeRecordCompare()
+** that (a) the first field of pPKey2 is a string, that (b) the first field
+** uses the collation sequence BINARY and (c) that the size-of-header varint
+** at the start of (pKey1/nKey1) fits in a single byte.
+*/
+static int vdbeRecordCompareString(
+ int nKey1, const void *pKey1, /* Left key */
+ UnpackedRecord *pPKey2 /* Right key */
+){
+ const u8 *aKey1 = (const u8*)pKey1;
+ int serial_type;
+ int res;
+
+ assert( pPKey2->aMem[0].flags & MEM_Str );
+ vdbeAssertFieldCountWithinLimits(nKey1, pKey1, pPKey2->pKeyInfo);
+ getVarint32(&aKey1[1], serial_type);
+ if( serial_type<12 ){
+ res = pPKey2->r1; /* (pKey1/nKey1) is a number or a null */
+ }else if( !(serial_type & 0x01) ){
+ res = pPKey2->r2; /* (pKey1/nKey1) is a blob */
+ }else{
+ int nCmp;
+ int nStr;
+ int szHdr = aKey1[0];
+
+ nStr = (serial_type-12) / 2;
+ if( (szHdr + nStr) > nKey1 ){
+ pPKey2->errCode = (u8)SQLITE_CORRUPT_BKPT;
+ return 0; /* Corruption */
+ }
+ nCmp = MIN( pPKey2->aMem[0].n, nStr );
+ res = memcmp(&aKey1[szHdr], pPKey2->aMem[0].z, nCmp);
+
+ if( res==0 ){
+ res = nStr - pPKey2->aMem[0].n;
+ if( res==0 ){
+ if( pPKey2->nField>1 ){
+ res = sqlite3VdbeRecordCompareWithSkip(nKey1, pKey1, pPKey2, 1);
+ }else{
+ res = pPKey2->default_rc;
+ pPKey2->eqSeen = 1;
+ }
+ }else if( res>0 ){
+ res = pPKey2->r2;
+ }else{
+ res = pPKey2->r1;
+ }
+ }else if( res>0 ){
+ res = pPKey2->r2;
+ }else{
+ res = pPKey2->r1;
+ }
+ }
+
+ assert( vdbeRecordCompareDebug(nKey1, pKey1, pPKey2, res)
+ || CORRUPT_DB
+ || pPKey2->pKeyInfo->db->mallocFailed
+ );
+ return res;
+}
+
+/*
+** Return a pointer to an sqlite3VdbeRecordCompare() compatible function
+** suitable for comparing serialized records to the unpacked record passed
+** as the only argument.
+*/
+SQLITE_PRIVATE RecordCompare sqlite3VdbeFindCompare(UnpackedRecord *p){
+ /* varintRecordCompareInt() and varintRecordCompareString() both assume
+ ** that the size-of-header varint that occurs at the start of each record
+ ** fits in a single byte (i.e. is 127 or less). varintRecordCompareInt()
+ ** also assumes that it is safe to overread a buffer by at least the
+ ** maximum possible legal header size plus 8 bytes. Because there is
+ ** guaranteed to be at least 74 (but not 136) bytes of padding following each
+ ** buffer passed to varintRecordCompareInt() this makes it convenient to
+ ** limit the size of the header to 64 bytes in cases where the first field
+ ** is an integer.
+ **
+ ** The easiest way to enforce this limit is to consider only records with
+ ** 13 fields or less. If the first field is an integer, the maximum legal
+ ** header size is (12*5 + 1 + 1) bytes. */
+ if( (p->pKeyInfo->nField + p->pKeyInfo->nXField)<=13 ){
+ int flags = p->aMem[0].flags;
+ if( p->pKeyInfo->aSortOrder[0] ){
+ p->r1 = 1;
+ p->r2 = -1;
+ }else{
+ p->r1 = -1;
+ p->r2 = 1;
+ }
+ if( (flags & MEM_Int) ){
+ return vdbeRecordCompareInt;
+ }
+ testcase( flags & MEM_Real );
+ testcase( flags & MEM_Null );
+ testcase( flags & MEM_Blob );
+ if( (flags & (MEM_Real|MEM_Null|MEM_Blob))==0 && p->pKeyInfo->aColl[0]==0 ){
+ assert( flags & MEM_Str );
+ return vdbeRecordCompareString;
+ }
+ }
+
+ return sqlite3VdbeRecordCompare;
+}
+
+/*
+** pCur points at an index entry created using the OP_MakeRecord opcode.
+** Read the rowid (the last field in the record) and store it in *rowid.
+** Return SQLITE_OK if everything works, or an error code otherwise.
+**
+** pCur might be pointing to text obtained from a corrupt database file.
+** So the content cannot be trusted. Do appropriate checks on the content.
+*/
+SQLITE_PRIVATE int sqlite3VdbeIdxRowid(sqlite3 *db, BtCursor *pCur, i64 *rowid){
+ i64 nCellKey = 0;
+ int rc;
+ u32 szHdr; /* Size of the header */
+ u32 typeRowid; /* Serial type of the rowid */
+ u32 lenRowid; /* Size of the rowid */
+ Mem m, v;
+
+ /* Get the size of the index entry. Only indices entries of less
+ ** than 2GiB are support - anything large must be database corruption.
+ ** Any corruption is detected in sqlite3BtreeParseCellPtr(), though, so
+ ** this code can safely assume that nCellKey is 32-bits
+ */
+ assert( sqlite3BtreeCursorIsValid(pCur) );
+ nCellKey = sqlite3BtreePayloadSize(pCur);
+ assert( (nCellKey & SQLITE_MAX_U32)==(u64)nCellKey );
+
+ /* Read in the complete content of the index entry */
+ sqlite3VdbeMemInit(&m, db, 0);
+ rc = sqlite3VdbeMemFromBtree(pCur, 0, (u32)nCellKey, &m);
+ if( rc ){
+ return rc;
+ }
+
+ /* The index entry must begin with a header size */
+ (void)getVarint32((u8*)m.z, szHdr);
+ testcase( szHdr==3 );
+ testcase( szHdr==m.n );
+ if( unlikely(szHdr<3 || (int)szHdr>m.n) ){
+ goto idx_rowid_corruption;
+ }
+
+ /* The last field of the index should be an integer - the ROWID.
+ ** Verify that the last entry really is an integer. */
+ (void)getVarint32((u8*)&m.z[szHdr-1], typeRowid);
+ testcase( typeRowid==1 );
+ testcase( typeRowid==2 );
+ testcase( typeRowid==3 );
+ testcase( typeRowid==4 );
+ testcase( typeRowid==5 );
+ testcase( typeRowid==6 );
+ testcase( typeRowid==8 );
+ testcase( typeRowid==9 );
+ if( unlikely(typeRowid<1 || typeRowid>9 || typeRowid==7) ){
+ goto idx_rowid_corruption;
+ }
+ lenRowid = sqlite3SmallTypeSizes[typeRowid];
+ testcase( (u32)m.n==szHdr+lenRowid );
+ if( unlikely((u32)m.n<szHdr+lenRowid) ){
+ goto idx_rowid_corruption;
+ }
+
+ /* Fetch the integer off the end of the index record */
+ sqlite3VdbeSerialGet((u8*)&m.z[m.n-lenRowid], typeRowid, &v);
+ *rowid = v.u.i;
+ sqlite3VdbeMemRelease(&m);
+ return SQLITE_OK;
+
+ /* Jump here if database corruption is detected after m has been
+ ** allocated. Free the m object and return SQLITE_CORRUPT. */
+idx_rowid_corruption:
+ testcase( m.szMalloc!=0 );
+ sqlite3VdbeMemRelease(&m);
+ return SQLITE_CORRUPT_BKPT;
+}
+
+/*
+** Compare the key of the index entry that cursor pC is pointing to against
+** the key string in pUnpacked. Write into *pRes a number
+** that is negative, zero, or positive if pC is less than, equal to,
+** or greater than pUnpacked. Return SQLITE_OK on success.
+**
+** pUnpacked is either created without a rowid or is truncated so that it
+** omits the rowid at the end. The rowid at the end of the index entry
+** is ignored as well. Hence, this routine only compares the prefixes
+** of the keys prior to the final rowid, not the entire key.
+*/
+SQLITE_PRIVATE int sqlite3VdbeIdxKeyCompare(
+ sqlite3 *db, /* Database connection */
+ VdbeCursor *pC, /* The cursor to compare against */
+ UnpackedRecord *pUnpacked, /* Unpacked version of key */
+ int *res /* Write the comparison result here */
+){
+ i64 nCellKey = 0;
+ int rc;
+ BtCursor *pCur;
+ Mem m;
+
+ assert( pC->eCurType==CURTYPE_BTREE );
+ pCur = pC->uc.pCursor;
+ assert( sqlite3BtreeCursorIsValid(pCur) );
+ nCellKey = sqlite3BtreePayloadSize(pCur);
+ /* nCellKey will always be between 0 and 0xffffffff because of the way
+ ** that btreeParseCellPtr() and sqlite3GetVarint32() are implemented */
+ if( nCellKey<=0 || nCellKey>0x7fffffff ){
+ *res = 0;
+ return SQLITE_CORRUPT_BKPT;
+ }
+ sqlite3VdbeMemInit(&m, db, 0);
+ rc = sqlite3VdbeMemFromBtree(pCur, 0, (u32)nCellKey, &m);
+ if( rc ){
+ return rc;
+ }
+ *res = sqlite3VdbeRecordCompare(m.n, m.z, pUnpacked);
+ sqlite3VdbeMemRelease(&m);
+ return SQLITE_OK;
+}
+
+/*
+** This routine sets the value to be returned by subsequent calls to
+** sqlite3_changes() on the database handle 'db'.
+*/
+SQLITE_PRIVATE void sqlite3VdbeSetChanges(sqlite3 *db, int nChange){
+ assert( sqlite3_mutex_held(db->mutex) );
+ db->nChange = nChange;
+ db->nTotalChange += nChange;
+}
+
+/*
+** Set a flag in the vdbe to update the change counter when it is finalised
+** or reset.
+*/
+SQLITE_PRIVATE void sqlite3VdbeCountChanges(Vdbe *v){
+ v->changeCntOn = 1;
+}
+
+/*
+** Mark every prepared statement associated with a database connection
+** as expired.
+**
+** An expired statement means that recompilation of the statement is
+** recommend. Statements expire when things happen that make their
+** programs obsolete. Removing user-defined functions or collating
+** sequences, or changing an authorization function are the types of
+** things that make prepared statements obsolete.
+*/
+SQLITE_PRIVATE void sqlite3ExpirePreparedStatements(sqlite3 *db){
+ Vdbe *p;
+ for(p = db->pVdbe; p; p=p->pNext){
+ p->expired = 1;
+ }
+}
+
+/*
+** Return the database associated with the Vdbe.
+*/
+SQLITE_PRIVATE sqlite3 *sqlite3VdbeDb(Vdbe *v){
+ return v->db;
+}
+
+/*
+** Return a pointer to an sqlite3_value structure containing the value bound
+** parameter iVar of VM v. Except, if the value is an SQL NULL, return
+** 0 instead. Unless it is NULL, apply affinity aff (one of the SQLITE_AFF_*
+** constants) to the value before returning it.
+**
+** The returned value must be freed by the caller using sqlite3ValueFree().
+*/
+SQLITE_PRIVATE sqlite3_value *sqlite3VdbeGetBoundValue(Vdbe *v, int iVar, u8 aff){
+ assert( iVar>0 );
+ if( v ){
+ Mem *pMem = &v->aVar[iVar-1];
+ if( 0==(pMem->flags & MEM_Null) ){
+ sqlite3_value *pRet = sqlite3ValueNew(v->db);
+ if( pRet ){
+ sqlite3VdbeMemCopy((Mem *)pRet, pMem);
+ sqlite3ValueApplyAffinity(pRet, aff, SQLITE_UTF8);
+ }
+ return pRet;
+ }
+ }
+ return 0;
+}
+
+/*
+** Configure SQL variable iVar so that binding a new value to it signals
+** to sqlite3_reoptimize() that re-preparing the statement may result
+** in a better query plan.
+*/
+SQLITE_PRIVATE void sqlite3VdbeSetVarmask(Vdbe *v, int iVar){
+ assert( iVar>0 );
+ if( iVar>32 ){
+ v->expmask = 0xffffffff;
+ }else{
+ v->expmask |= ((u32)1 << (iVar-1));
+ }
+}
+
+#ifndef SQLITE_OMIT_VIRTUALTABLE
+/*
+** Transfer error message text from an sqlite3_vtab.zErrMsg (text stored
+** in memory obtained from sqlite3_malloc) into a Vdbe.zErrMsg (text stored
+** in memory obtained from sqlite3DbMalloc).
+*/
+SQLITE_PRIVATE void sqlite3VtabImportErrmsg(Vdbe *p, sqlite3_vtab *pVtab){
+ if( pVtab->zErrMsg ){
+ sqlite3 *db = p->db;
+ sqlite3DbFree(db, p->zErrMsg);
+ p->zErrMsg = sqlite3DbStrDup(db, pVtab->zErrMsg);
+ sqlite3_free(pVtab->zErrMsg);
+ pVtab->zErrMsg = 0;
+ }
+}
+#endif /* SQLITE_OMIT_VIRTUALTABLE */
+
+#ifdef SQLITE_ENABLE_PREUPDATE_HOOK
+
+/*
+** If the second argument is not NULL, release any allocations associated
+** with the memory cells in the p->aMem[] array. Also free the UnpackedRecord
+** structure itself, using sqlite3DbFree().
+**
+** This function is used to free UnpackedRecord structures allocated by
+** the vdbeUnpackRecord() function found in vdbeapi.c.
+*/
+static void vdbeFreeUnpacked(sqlite3 *db, int nField, UnpackedRecord *p){
+ if( p ){
+ int i;
+ for(i=0; i<nField; i++){
+ Mem *pMem = &p->aMem[i];
+ if( pMem->zMalloc ) sqlite3VdbeMemRelease(pMem);
+ }
+ sqlite3DbFree(db, p);
+ }
+}
+#endif /* SQLITE_ENABLE_PREUPDATE_HOOK */
+
+#ifdef SQLITE_ENABLE_PREUPDATE_HOOK
+/*
+** Invoke the pre-update hook. If this is an UPDATE or DELETE pre-update call,
+** then cursor passed as the second argument should point to the row about
+** to be update or deleted. If the application calls sqlite3_preupdate_old(),
+** the required value will be read from the row the cursor points to.
+*/
+SQLITE_PRIVATE void sqlite3VdbePreUpdateHook(
+ Vdbe *v, /* Vdbe pre-update hook is invoked by */
+ VdbeCursor *pCsr, /* Cursor to grab old.* values from */
+ int op, /* SQLITE_INSERT, UPDATE or DELETE */
+ const char *zDb, /* Database name */
+ Table *pTab, /* Modified table */
+ i64 iKey1, /* Initial key value */
+ int iReg /* Register for new.* record */
+){
+ sqlite3 *db = v->db;
+ i64 iKey2;
+ PreUpdate preupdate;
+ const char *zTbl = pTab->zName;
+ static const u8 fakeSortOrder = 0;
+
+ assert( db->pPreUpdate==0 );
+ memset(&preupdate, 0, sizeof(PreUpdate));
+ if( HasRowid(pTab)==0 ){
+ iKey1 = iKey2 = 0;
+ preupdate.pPk = sqlite3PrimaryKeyIndex(pTab);
+ }else{
+ if( op==SQLITE_UPDATE ){
+ iKey2 = v->aMem[iReg].u.i;
+ }else{
+ iKey2 = iKey1;
+ }
+ }
+
+ assert( pCsr->nField==pTab->nCol
+ || (pCsr->nField==pTab->nCol+1 && op==SQLITE_DELETE && iReg==-1)
+ );
+
+ preupdate.v = v;
+ preupdate.pCsr = pCsr;
+ preupdate.op = op;
+ preupdate.iNewReg = iReg;
+ preupdate.keyinfo.db = db;
+ preupdate.keyinfo.enc = ENC(db);
+ preupdate.keyinfo.nField = pTab->nCol;
+ preupdate.keyinfo.aSortOrder = (u8*)&fakeSortOrder;
+ preupdate.iKey1 = iKey1;
+ preupdate.iKey2 = iKey2;
+ preupdate.pTab = pTab;
+
+ db->pPreUpdate = &preupdate;
+ db->xPreUpdateCallback(db->pPreUpdateArg, db, op, zDb, zTbl, iKey1, iKey2);
+ db->pPreUpdate = 0;
+ sqlite3DbFree(db, preupdate.aRecord);
+ vdbeFreeUnpacked(db, preupdate.keyinfo.nField+1, preupdate.pUnpacked);
+ vdbeFreeUnpacked(db, preupdate.keyinfo.nField+1, preupdate.pNewUnpacked);
+ if( preupdate.aNew ){
+ int i;
+ for(i=0; i<pCsr->nField; i++){
+ sqlite3VdbeMemRelease(&preupdate.aNew[i]);
+ }
+ sqlite3DbFree(db, preupdate.aNew);
+ }
+}
+#endif /* SQLITE_ENABLE_PREUPDATE_HOOK */
+
+/************** End of vdbeaux.c *********************************************/
+/************** Begin file vdbeapi.c *****************************************/
+/*
+** 2004 May 26
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+**
+** This file contains code use to implement APIs that are part of the
+** VDBE.
+*/
+/* #include "sqliteInt.h" */
+/* #include "vdbeInt.h" */
+
+#ifndef SQLITE_OMIT_DEPRECATED
+/*
+** Return TRUE (non-zero) of the statement supplied as an argument needs
+** to be recompiled. A statement needs to be recompiled whenever the
+** execution environment changes in a way that would alter the program
+** that sqlite3_prepare() generates. For example, if new functions or
+** collating sequences are registered or if an authorizer function is
+** added or changed.
+*/
+SQLITE_API int sqlite3_expired(sqlite3_stmt *pStmt){
+ Vdbe *p = (Vdbe*)pStmt;
+ return p==0 || p->expired;
+}
+#endif
+
+/*
+** Check on a Vdbe to make sure it has not been finalized. Log
+** an error and return true if it has been finalized (or is otherwise
+** invalid). Return false if it is ok.
+*/
+static int vdbeSafety(Vdbe *p){
+ if( p->db==0 ){
+ sqlite3_log(SQLITE_MISUSE, "API called with finalized prepared statement");
+ return 1;
+ }else{
+ return 0;
+ }
+}
+static int vdbeSafetyNotNull(Vdbe *p){
+ if( p==0 ){
+ sqlite3_log(SQLITE_MISUSE, "API called with NULL prepared statement");
+ return 1;
+ }else{
+ return vdbeSafety(p);
+ }
+}
+
+#ifndef SQLITE_OMIT_TRACE
+/*
+** Invoke the profile callback. This routine is only called if we already
+** know that the profile callback is defined and needs to be invoked.
+*/
+static SQLITE_NOINLINE void invokeProfileCallback(sqlite3 *db, Vdbe *p){
+ sqlite3_int64 iNow;
+ sqlite3_int64 iElapse;
+ assert( p->startTime>0 );
+ assert( db->xProfile!=0 || (db->mTrace & SQLITE_TRACE_PROFILE)!=0 );
+ assert( db->init.busy==0 );
+ assert( p->zSql!=0 );
+ sqlite3OsCurrentTimeInt64(db->pVfs, &iNow);
+ iElapse = (iNow - p->startTime)*1000000;
+ if( db->xProfile ){
+ db->xProfile(db->pProfileArg, p->zSql, iElapse);
+ }
+ if( db->mTrace & SQLITE_TRACE_PROFILE ){
+ db->xTrace(SQLITE_TRACE_PROFILE, db->pTraceArg, p, (void*)&iElapse);
+ }
+ p->startTime = 0;
+}
+/*
+** The checkProfileCallback(DB,P) macro checks to see if a profile callback
+** is needed, and it invokes the callback if it is needed.
+*/
+# define checkProfileCallback(DB,P) \
+ if( ((P)->startTime)>0 ){ invokeProfileCallback(DB,P); }
+#else
+# define checkProfileCallback(DB,P) /*no-op*/
+#endif
+
+/*
+** The following routine destroys a virtual machine that is created by
+** the sqlite3_compile() routine. The integer returned is an SQLITE_
+** success/failure code that describes the result of executing the virtual
+** machine.
+**
+** This routine sets the error code and string returned by
+** sqlite3_errcode(), sqlite3_errmsg() and sqlite3_errmsg16().
+*/
+SQLITE_API int sqlite3_finalize(sqlite3_stmt *pStmt){
+ int rc;
+ if( pStmt==0 ){
+ /* IMPLEMENTATION-OF: R-57228-12904 Invoking sqlite3_finalize() on a NULL
+ ** pointer is a harmless no-op. */
+ rc = SQLITE_OK;
+ }else{
+ Vdbe *v = (Vdbe*)pStmt;
+ sqlite3 *db = v->db;
+ if( vdbeSafety(v) ) return SQLITE_MISUSE_BKPT;
+ sqlite3_mutex_enter(db->mutex);
+ checkProfileCallback(db, v);
+ rc = sqlite3VdbeFinalize(v);
+ rc = sqlite3ApiExit(db, rc);
+ sqlite3LeaveMutexAndCloseZombie(db);
+ }
+ return rc;
+}
+
+/*
+** Terminate the current execution of an SQL statement and reset it
+** back to its starting state so that it can be reused. A success code from
+** the prior execution is returned.
+**
+** This routine sets the error code and string returned by
+** sqlite3_errcode(), sqlite3_errmsg() and sqlite3_errmsg16().
+*/
+SQLITE_API int sqlite3_reset(sqlite3_stmt *pStmt){
+ int rc;
+ if( pStmt==0 ){
+ rc = SQLITE_OK;
+ }else{
+ Vdbe *v = (Vdbe*)pStmt;
+ sqlite3 *db = v->db;
+ sqlite3_mutex_enter(db->mutex);
+ checkProfileCallback(db, v);
+ rc = sqlite3VdbeReset(v);
+ sqlite3VdbeRewind(v);
+ assert( (rc & (db->errMask))==rc );
+ rc = sqlite3ApiExit(db, rc);
+ sqlite3_mutex_leave(db->mutex);
+ }
+ return rc;
+}
+
+/*
+** Set all the parameters in the compiled SQL statement to NULL.
+*/
+SQLITE_API int sqlite3_clear_bindings(sqlite3_stmt *pStmt){
+ int i;
+ int rc = SQLITE_OK;
+ Vdbe *p = (Vdbe*)pStmt;
+#if SQLITE_THREADSAFE
+ sqlite3_mutex *mutex = ((Vdbe*)pStmt)->db->mutex;
+#endif
+ sqlite3_mutex_enter(mutex);
+ for(i=0; i<p->nVar; i++){
+ sqlite3VdbeMemRelease(&p->aVar[i]);
+ p->aVar[i].flags = MEM_Null;
+ }
+ if( p->isPrepareV2 && p->expmask ){
+ p->expired = 1;
+ }
+ sqlite3_mutex_leave(mutex);
+ return rc;
+}
+
+
+/**************************** sqlite3_value_ *******************************
+** The following routines extract information from a Mem or sqlite3_value
+** structure.
+*/
+SQLITE_API const void *sqlite3_value_blob(sqlite3_value *pVal){
+ Mem *p = (Mem*)pVal;
+ if( p->flags & (MEM_Blob|MEM_Str) ){
+ if( ExpandBlob(p)!=SQLITE_OK ){
+ assert( p->flags==MEM_Null && p->z==0 );
+ return 0;
+ }
+ p->flags |= MEM_Blob;
+ return p->n ? p->z : 0;
+ }else{
+ return sqlite3_value_text(pVal);
+ }
+}
+SQLITE_API int sqlite3_value_bytes(sqlite3_value *pVal){
+ return sqlite3ValueBytes(pVal, SQLITE_UTF8);
+}
+SQLITE_API int sqlite3_value_bytes16(sqlite3_value *pVal){
+ return sqlite3ValueBytes(pVal, SQLITE_UTF16NATIVE);
+}
+SQLITE_API double sqlite3_value_double(sqlite3_value *pVal){
+ return sqlite3VdbeRealValue((Mem*)pVal);
+}
+SQLITE_API int sqlite3_value_int(sqlite3_value *pVal){
+ return (int)sqlite3VdbeIntValue((Mem*)pVal);
+}
+SQLITE_API sqlite_int64 sqlite3_value_int64(sqlite3_value *pVal){
+ return sqlite3VdbeIntValue((Mem*)pVal);
+}
+SQLITE_API unsigned int sqlite3_value_subtype(sqlite3_value *pVal){
+ Mem *pMem = (Mem*)pVal;
+ return ((pMem->flags & MEM_Subtype) ? pMem->eSubtype : 0);
+}
+SQLITE_API const unsigned char *sqlite3_value_text(sqlite3_value *pVal){
+ return (const unsigned char *)sqlite3ValueText(pVal, SQLITE_UTF8);
+}
+#ifndef SQLITE_OMIT_UTF16
+SQLITE_API const void *sqlite3_value_text16(sqlite3_value* pVal){
+ return sqlite3ValueText(pVal, SQLITE_UTF16NATIVE);
+}
+SQLITE_API const void *sqlite3_value_text16be(sqlite3_value *pVal){
+ return sqlite3ValueText(pVal, SQLITE_UTF16BE);
+}
+SQLITE_API const void *sqlite3_value_text16le(sqlite3_value *pVal){
+ return sqlite3ValueText(pVal, SQLITE_UTF16LE);
+}
+#endif /* SQLITE_OMIT_UTF16 */
+/* EVIDENCE-OF: R-12793-43283 Every value in SQLite has one of five
+** fundamental datatypes: 64-bit signed integer 64-bit IEEE floating
+** point number string BLOB NULL
+*/
+SQLITE_API int sqlite3_value_type(sqlite3_value* pVal){
+ static const u8 aType[] = {
+ SQLITE_BLOB, /* 0x00 */
+ SQLITE_NULL, /* 0x01 */
+ SQLITE_TEXT, /* 0x02 */
+ SQLITE_NULL, /* 0x03 */
+ SQLITE_INTEGER, /* 0x04 */
+ SQLITE_NULL, /* 0x05 */
+ SQLITE_INTEGER, /* 0x06 */
+ SQLITE_NULL, /* 0x07 */
+ SQLITE_FLOAT, /* 0x08 */
+ SQLITE_NULL, /* 0x09 */
+ SQLITE_FLOAT, /* 0x0a */
+ SQLITE_NULL, /* 0x0b */
+ SQLITE_INTEGER, /* 0x0c */
+ SQLITE_NULL, /* 0x0d */
+ SQLITE_INTEGER, /* 0x0e */
+ SQLITE_NULL, /* 0x0f */
+ SQLITE_BLOB, /* 0x10 */
+ SQLITE_NULL, /* 0x11 */
+ SQLITE_TEXT, /* 0x12 */
+ SQLITE_NULL, /* 0x13 */
+ SQLITE_INTEGER, /* 0x14 */
+ SQLITE_NULL, /* 0x15 */
+ SQLITE_INTEGER, /* 0x16 */
+ SQLITE_NULL, /* 0x17 */
+ SQLITE_FLOAT, /* 0x18 */
+ SQLITE_NULL, /* 0x19 */
+ SQLITE_FLOAT, /* 0x1a */
+ SQLITE_NULL, /* 0x1b */
+ SQLITE_INTEGER, /* 0x1c */
+ SQLITE_NULL, /* 0x1d */
+ SQLITE_INTEGER, /* 0x1e */
+ SQLITE_NULL, /* 0x1f */
+ };
+ return aType[pVal->flags&MEM_AffMask];
+}
+
+/* Make a copy of an sqlite3_value object
+*/
+SQLITE_API sqlite3_value *sqlite3_value_dup(const sqlite3_value *pOrig){
+ sqlite3_value *pNew;
+ if( pOrig==0 ) return 0;
+ pNew = sqlite3_malloc( sizeof(*pNew) );
+ if( pNew==0 ) return 0;
+ memset(pNew, 0, sizeof(*pNew));
+ memcpy(pNew, pOrig, MEMCELLSIZE);
+ pNew->flags &= ~MEM_Dyn;
+ pNew->db = 0;
+ if( pNew->flags&(MEM_Str|MEM_Blob) ){
+ pNew->flags &= ~(MEM_Static|MEM_Dyn);
+ pNew->flags |= MEM_Ephem;
+ if( sqlite3VdbeMemMakeWriteable(pNew)!=SQLITE_OK ){
+ sqlite3ValueFree(pNew);
+ pNew = 0;
+ }
+ }
+ return pNew;
+}
+
+/* Destroy an sqlite3_value object previously obtained from
+** sqlite3_value_dup().
+*/
+SQLITE_API void sqlite3_value_free(sqlite3_value *pOld){
+ sqlite3ValueFree(pOld);
+}
+
+
+/**************************** sqlite3_result_ *******************************
+** The following routines are used by user-defined functions to specify
+** the function result.
+**
+** The setStrOrError() function calls sqlite3VdbeMemSetStr() to store the
+** result as a string or blob but if the string or blob is too large, it
+** then sets the error code to SQLITE_TOOBIG
+**
+** The invokeValueDestructor(P,X) routine invokes destructor function X()
+** on value P is not going to be used and need to be destroyed.
+*/
+static void setResultStrOrError(
+ sqlite3_context *pCtx, /* Function context */
+ const char *z, /* String pointer */
+ int n, /* Bytes in string, or negative */
+ u8 enc, /* Encoding of z. 0 for BLOBs */
+ void (*xDel)(void*) /* Destructor function */
+){
+ if( sqlite3VdbeMemSetStr(pCtx->pOut, z, n, enc, xDel)==SQLITE_TOOBIG ){
+ sqlite3_result_error_toobig(pCtx);
+ }
+}
+static int invokeValueDestructor(
+ const void *p, /* Value to destroy */
+ void (*xDel)(void*), /* The destructor */
+ sqlite3_context *pCtx /* Set a SQLITE_TOOBIG error if no NULL */
+){
+ assert( xDel!=SQLITE_DYNAMIC );
+ if( xDel==0 ){
+ /* noop */
+ }else if( xDel==SQLITE_TRANSIENT ){
+ /* noop */
+ }else{
+ xDel((void*)p);
+ }
+ if( pCtx ) sqlite3_result_error_toobig(pCtx);
+ return SQLITE_TOOBIG;
+}
+SQLITE_API void sqlite3_result_blob(
+ sqlite3_context *pCtx,
+ const void *z,
+ int n,
+ void (*xDel)(void *)
+){
+ assert( n>=0 );
+ assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
+ setResultStrOrError(pCtx, z, n, 0, xDel);
+}
+SQLITE_API void sqlite3_result_blob64(
+ sqlite3_context *pCtx,
+ const void *z,
+ sqlite3_uint64 n,
+ void (*xDel)(void *)
+){
+ assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
+ assert( xDel!=SQLITE_DYNAMIC );
+ if( n>0x7fffffff ){
+ (void)invokeValueDestructor(z, xDel, pCtx);
+ }else{
+ setResultStrOrError(pCtx, z, (int)n, 0, xDel);
+ }
+}
+SQLITE_API void sqlite3_result_double(sqlite3_context *pCtx, double rVal){
+ assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
+ sqlite3VdbeMemSetDouble(pCtx->pOut, rVal);
+}
+SQLITE_API void sqlite3_result_error(sqlite3_context *pCtx, const char *z, int n){
+ assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
+ pCtx->isError = SQLITE_ERROR;
+ pCtx->fErrorOrAux = 1;
+ sqlite3VdbeMemSetStr(pCtx->pOut, z, n, SQLITE_UTF8, SQLITE_TRANSIENT);
+}
+#ifndef SQLITE_OMIT_UTF16
+SQLITE_API void sqlite3_result_error16(sqlite3_context *pCtx, const void *z, int n){
+ assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
+ pCtx->isError = SQLITE_ERROR;
+ pCtx->fErrorOrAux = 1;
+ sqlite3VdbeMemSetStr(pCtx->pOut, z, n, SQLITE_UTF16NATIVE, SQLITE_TRANSIENT);
+}
+#endif
+SQLITE_API void sqlite3_result_int(sqlite3_context *pCtx, int iVal){
+ assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
+ sqlite3VdbeMemSetInt64(pCtx->pOut, (i64)iVal);
+}
+SQLITE_API void sqlite3_result_int64(sqlite3_context *pCtx, i64 iVal){
+ assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
+ sqlite3VdbeMemSetInt64(pCtx->pOut, iVal);
+}
+SQLITE_API void sqlite3_result_null(sqlite3_context *pCtx){
+ assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
+ sqlite3VdbeMemSetNull(pCtx->pOut);
+}
+SQLITE_API void sqlite3_result_subtype(sqlite3_context *pCtx, unsigned int eSubtype){
+ Mem *pOut = pCtx->pOut;
+ assert( sqlite3_mutex_held(pOut->db->mutex) );
+ pOut->eSubtype = eSubtype & 0xff;
+ pOut->flags |= MEM_Subtype;
+}
+SQLITE_API void sqlite3_result_text(
+ sqlite3_context *pCtx,
+ const char *z,
+ int n,
+ void (*xDel)(void *)
+){
+ assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
+ setResultStrOrError(pCtx, z, n, SQLITE_UTF8, xDel);
+}
+SQLITE_API void sqlite3_result_text64(
+ sqlite3_context *pCtx,
+ const char *z,
+ sqlite3_uint64 n,
+ void (*xDel)(void *),
+ unsigned char enc
+){
+ assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
+ assert( xDel!=SQLITE_DYNAMIC );
+ if( enc==SQLITE_UTF16 ) enc = SQLITE_UTF16NATIVE;
+ if( n>0x7fffffff ){
+ (void)invokeValueDestructor(z, xDel, pCtx);
+ }else{
+ setResultStrOrError(pCtx, z, (int)n, enc, xDel);
+ }
+}
+#ifndef SQLITE_OMIT_UTF16
+SQLITE_API void sqlite3_result_text16(
+ sqlite3_context *pCtx,
+ const void *z,
+ int n,
+ void (*xDel)(void *)
+){
+ assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
+ setResultStrOrError(pCtx, z, n, SQLITE_UTF16NATIVE, xDel);
+}
+SQLITE_API void sqlite3_result_text16be(
+ sqlite3_context *pCtx,
+ const void *z,
+ int n,
+ void (*xDel)(void *)
+){
+ assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
+ setResultStrOrError(pCtx, z, n, SQLITE_UTF16BE, xDel);
+}
+SQLITE_API void sqlite3_result_text16le(
+ sqlite3_context *pCtx,
+ const void *z,
+ int n,
+ void (*xDel)(void *)
+){
+ assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
+ setResultStrOrError(pCtx, z, n, SQLITE_UTF16LE, xDel);
+}
+#endif /* SQLITE_OMIT_UTF16 */
+SQLITE_API void sqlite3_result_value(sqlite3_context *pCtx, sqlite3_value *pValue){
+ assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
+ sqlite3VdbeMemCopy(pCtx->pOut, pValue);
+}
+SQLITE_API void sqlite3_result_zeroblob(sqlite3_context *pCtx, int n){
+ assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
+ sqlite3VdbeMemSetZeroBlob(pCtx->pOut, n);
+}
+SQLITE_API int sqlite3_result_zeroblob64(sqlite3_context *pCtx, u64 n){
+ Mem *pOut = pCtx->pOut;
+ assert( sqlite3_mutex_held(pOut->db->mutex) );
+ if( n>(u64)pOut->db->aLimit[SQLITE_LIMIT_LENGTH] ){
+ return SQLITE_TOOBIG;
+ }
+ sqlite3VdbeMemSetZeroBlob(pCtx->pOut, (int)n);
+ return SQLITE_OK;
+}
+SQLITE_API void sqlite3_result_error_code(sqlite3_context *pCtx, int errCode){
+ pCtx->isError = errCode;
+ pCtx->fErrorOrAux = 1;
+#ifdef SQLITE_DEBUG
+ if( pCtx->pVdbe ) pCtx->pVdbe->rcApp = errCode;
+#endif
+ if( pCtx->pOut->flags & MEM_Null ){
+ sqlite3VdbeMemSetStr(pCtx->pOut, sqlite3ErrStr(errCode), -1,
+ SQLITE_UTF8, SQLITE_STATIC);
+ }
+}
+
+/* Force an SQLITE_TOOBIG error. */
+SQLITE_API void sqlite3_result_error_toobig(sqlite3_context *pCtx){
+ assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
+ pCtx->isError = SQLITE_TOOBIG;
+ pCtx->fErrorOrAux = 1;
+ sqlite3VdbeMemSetStr(pCtx->pOut, "string or blob too big", -1,
+ SQLITE_UTF8, SQLITE_STATIC);
+}
+
+/* An SQLITE_NOMEM error. */
+SQLITE_API void sqlite3_result_error_nomem(sqlite3_context *pCtx){
+ assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
+ sqlite3VdbeMemSetNull(pCtx->pOut);
+ pCtx->isError = SQLITE_NOMEM_BKPT;
+ pCtx->fErrorOrAux = 1;
+ sqlite3OomFault(pCtx->pOut->db);
+}
+
+/*
+** This function is called after a transaction has been committed. It
+** invokes callbacks registered with sqlite3_wal_hook() as required.
+*/
+static int doWalCallbacks(sqlite3 *db){
+ int rc = SQLITE_OK;
+#ifndef SQLITE_OMIT_WAL
+ int i;
+ for(i=0; i<db->nDb; i++){
+ Btree *pBt = db->aDb[i].pBt;
+ if( pBt ){
+ int nEntry;
+ sqlite3BtreeEnter(pBt);
+ nEntry = sqlite3PagerWalCallback(sqlite3BtreePager(pBt));
+ sqlite3BtreeLeave(pBt);
+ if( db->xWalCallback && nEntry>0 && rc==SQLITE_OK ){
+ rc = db->xWalCallback(db->pWalArg, db, db->aDb[i].zDbSName, nEntry);
+ }
+ }
+ }
+#endif
+ return rc;
+}
+
+
+/*
+** Execute the statement pStmt, either until a row of data is ready, the
+** statement is completely executed or an error occurs.
+**
+** This routine implements the bulk of the logic behind the sqlite_step()
+** API. The only thing omitted is the automatic recompile if a
+** schema change has occurred. That detail is handled by the
+** outer sqlite3_step() wrapper procedure.
+*/
+static int sqlite3Step(Vdbe *p){
+ sqlite3 *db;
+ int rc;
+
+ assert(p);
+ if( p->magic!=VDBE_MAGIC_RUN ){
+ /* We used to require that sqlite3_reset() be called before retrying
+ ** sqlite3_step() after any error or after SQLITE_DONE. But beginning
+ ** with version 3.7.0, we changed this so that sqlite3_reset() would
+ ** be called automatically instead of throwing the SQLITE_MISUSE error.
+ ** This "automatic-reset" change is not technically an incompatibility,
+ ** since any application that receives an SQLITE_MISUSE is broken by
+ ** definition.
+ **
+ ** Nevertheless, some published applications that were originally written
+ ** for version 3.6.23 or earlier do in fact depend on SQLITE_MISUSE
+ ** returns, and those were broken by the automatic-reset change. As a
+ ** a work-around, the SQLITE_OMIT_AUTORESET compile-time restores the
+ ** legacy behavior of returning SQLITE_MISUSE for cases where the
+ ** previous sqlite3_step() returned something other than a SQLITE_LOCKED
+ ** or SQLITE_BUSY error.
+ */
+#ifdef SQLITE_OMIT_AUTORESET
+ if( (rc = p->rc&0xff)==SQLITE_BUSY || rc==SQLITE_LOCKED ){
+ sqlite3_reset((sqlite3_stmt*)p);
+ }else{
+ return SQLITE_MISUSE_BKPT;
+ }
+#else
+ sqlite3_reset((sqlite3_stmt*)p);
+#endif
+ }
+
+ /* Check that malloc() has not failed. If it has, return early. */
+ db = p->db;
+ if( db->mallocFailed ){
+ p->rc = SQLITE_NOMEM;
+ return SQLITE_NOMEM_BKPT;
+ }
+
+ if( p->pc<=0 && p->expired ){
+ p->rc = SQLITE_SCHEMA;
+ rc = SQLITE_ERROR;
+ goto end_of_step;
+ }
+ if( p->pc<0 ){
+ /* If there are no other statements currently running, then
+ ** reset the interrupt flag. This prevents a call to sqlite3_interrupt
+ ** from interrupting a statement that has not yet started.
+ */
+ if( db->nVdbeActive==0 ){
+ db->u1.isInterrupted = 0;
+ }
+
+ assert( db->nVdbeWrite>0 || db->autoCommit==0
+ || (db->nDeferredCons==0 && db->nDeferredImmCons==0)
+ );
+
+#ifndef SQLITE_OMIT_TRACE
+ if( (db->xProfile || (db->mTrace & SQLITE_TRACE_PROFILE)!=0)
+ && !db->init.busy && p->zSql ){
+ sqlite3OsCurrentTimeInt64(db->pVfs, &p->startTime);
+ }else{
+ assert( p->startTime==0 );
+ }
+#endif
+
+ db->nVdbeActive++;
+ if( p->readOnly==0 ) db->nVdbeWrite++;
+ if( p->bIsReader ) db->nVdbeRead++;
+ p->pc = 0;
+ }
+#ifdef SQLITE_DEBUG
+ p->rcApp = SQLITE_OK;
+#endif
+#ifndef SQLITE_OMIT_EXPLAIN
+ if( p->explain ){
+ rc = sqlite3VdbeList(p);
+ }else
+#endif /* SQLITE_OMIT_EXPLAIN */
+ {
+ db->nVdbeExec++;
+ rc = sqlite3VdbeExec(p);
+ db->nVdbeExec--;
+ }
+
+#ifndef SQLITE_OMIT_TRACE
+ /* If the statement completed successfully, invoke the profile callback */
+ if( rc!=SQLITE_ROW ) checkProfileCallback(db, p);
+#endif
+
+ if( rc==SQLITE_DONE ){
+ assert( p->rc==SQLITE_OK );
+ p->rc = doWalCallbacks(db);
+ if( p->rc!=SQLITE_OK ){
+ rc = SQLITE_ERROR;
+ }
+ }
+
+ db->errCode = rc;
+ if( SQLITE_NOMEM==sqlite3ApiExit(p->db, p->rc) ){
+ p->rc = SQLITE_NOMEM_BKPT;
+ }
+end_of_step:
+ /* At this point local variable rc holds the value that should be
+ ** returned if this statement was compiled using the legacy
+ ** sqlite3_prepare() interface. According to the docs, this can only
+ ** be one of the values in the first assert() below. Variable p->rc
+ ** contains the value that would be returned if sqlite3_finalize()
+ ** were called on statement p.
+ */
+ assert( rc==SQLITE_ROW || rc==SQLITE_DONE || rc==SQLITE_ERROR
+ || (rc&0xff)==SQLITE_BUSY || rc==SQLITE_MISUSE
+ );
+ assert( (p->rc!=SQLITE_ROW && p->rc!=SQLITE_DONE) || p->rc==p->rcApp );
+ if( p->isPrepareV2 && rc!=SQLITE_ROW && rc!=SQLITE_DONE ){
+ /* If this statement was prepared using sqlite3_prepare_v2(), and an
+ ** error has occurred, then return the error code in p->rc to the
+ ** caller. Set the error code in the database handle to the same value.
+ */
+ rc = sqlite3VdbeTransferError(p);
+ }
+ return (rc&db->errMask);
+}
+
+/*
+** This is the top-level implementation of sqlite3_step(). Call
+** sqlite3Step() to do most of the work. If a schema error occurs,
+** call sqlite3Reprepare() and try again.
+*/
+SQLITE_API int sqlite3_step(sqlite3_stmt *pStmt){
+ int rc = SQLITE_OK; /* Result from sqlite3Step() */
+ int rc2 = SQLITE_OK; /* Result from sqlite3Reprepare() */
+ Vdbe *v = (Vdbe*)pStmt; /* the prepared statement */
+ int cnt = 0; /* Counter to prevent infinite loop of reprepares */
+ sqlite3 *db; /* The database connection */
+
+ if( vdbeSafetyNotNull(v) ){
+ return SQLITE_MISUSE_BKPT;
+ }
+ db = v->db;
+ sqlite3_mutex_enter(db->mutex);
+ v->doingRerun = 0;
+ while( (rc = sqlite3Step(v))==SQLITE_SCHEMA
+ && cnt++ < SQLITE_MAX_SCHEMA_RETRY ){
+ int savedPc = v->pc;
+ rc2 = rc = sqlite3Reprepare(v);
+ if( rc!=SQLITE_OK) break;
+ sqlite3_reset(pStmt);
+ if( savedPc>=0 ) v->doingRerun = 1;
+ assert( v->expired==0 );
+ }
+ if( rc2!=SQLITE_OK ){
+ /* This case occurs after failing to recompile an sql statement.
+ ** The error message from the SQL compiler has already been loaded
+ ** into the database handle. This block copies the error message
+ ** from the database handle into the statement and sets the statement
+ ** program counter to 0 to ensure that when the statement is
+ ** finalized or reset the parser error message is available via
+ ** sqlite3_errmsg() and sqlite3_errcode().
+ */
+ const char *zErr = (const char *)sqlite3_value_text(db->pErr);
+ sqlite3DbFree(db, v->zErrMsg);
+ if( !db->mallocFailed ){
+ v->zErrMsg = sqlite3DbStrDup(db, zErr);
+ v->rc = rc2;
+ } else {
+ v->zErrMsg = 0;
+ v->rc = rc = SQLITE_NOMEM_BKPT;
+ }
+ }
+ rc = sqlite3ApiExit(db, rc);
+ sqlite3_mutex_leave(db->mutex);
+ return rc;
+}
+
+
+/*
+** Extract the user data from a sqlite3_context structure and return a
+** pointer to it.
+*/
+SQLITE_API void *sqlite3_user_data(sqlite3_context *p){
+ assert( p && p->pFunc );
+ return p->pFunc->pUserData;
+}
+
+/*
+** Extract the user data from a sqlite3_context structure and return a
+** pointer to it.
+**
+** IMPLEMENTATION-OF: R-46798-50301 The sqlite3_context_db_handle() interface
+** returns a copy of the pointer to the database connection (the 1st
+** parameter) of the sqlite3_create_function() and
+** sqlite3_create_function16() routines that originally registered the
+** application defined function.
+*/
+SQLITE_API sqlite3 *sqlite3_context_db_handle(sqlite3_context *p){
+ assert( p && p->pOut );
+ return p->pOut->db;
+}
+
+/*
+** Return the current time for a statement. If the current time
+** is requested more than once within the same run of a single prepared
+** statement, the exact same time is returned for each invocation regardless
+** of the amount of time that elapses between invocations. In other words,
+** the time returned is always the time of the first call.
+*/
+SQLITE_PRIVATE sqlite3_int64 sqlite3StmtCurrentTime(sqlite3_context *p){
+ int rc;
+#ifndef SQLITE_ENABLE_STAT3_OR_STAT4
+ sqlite3_int64 *piTime = &p->pVdbe->iCurrentTime;
+ assert( p->pVdbe!=0 );
+#else
+ sqlite3_int64 iTime = 0;
+ sqlite3_int64 *piTime = p->pVdbe!=0 ? &p->pVdbe->iCurrentTime : &iTime;
+#endif
+ if( *piTime==0 ){
+ rc = sqlite3OsCurrentTimeInt64(p->pOut->db->pVfs, piTime);
+ if( rc ) *piTime = 0;
+ }
+ return *piTime;
+}
+
+/*
+** The following is the implementation of an SQL function that always
+** fails with an error message stating that the function is used in the
+** wrong context. The sqlite3_overload_function() API might construct
+** SQL function that use this routine so that the functions will exist
+** for name resolution but are actually overloaded by the xFindFunction
+** method of virtual tables.
+*/
+SQLITE_PRIVATE void sqlite3InvalidFunction(
+ sqlite3_context *context, /* The function calling context */
+ int NotUsed, /* Number of arguments to the function */
+ sqlite3_value **NotUsed2 /* Value of each argument */
+){
+ const char *zName = context->pFunc->zName;
+ char *zErr;
+ UNUSED_PARAMETER2(NotUsed, NotUsed2);
+ zErr = sqlite3_mprintf(
+ "unable to use function %s in the requested context", zName);
+ sqlite3_result_error(context, zErr, -1);
+ sqlite3_free(zErr);
+}
+
+/*
+** Create a new aggregate context for p and return a pointer to
+** its pMem->z element.
+*/
+static SQLITE_NOINLINE void *createAggContext(sqlite3_context *p, int nByte){
+ Mem *pMem = p->pMem;
+ assert( (pMem->flags & MEM_Agg)==0 );
+ if( nByte<=0 ){
+ sqlite3VdbeMemSetNull(pMem);
+ pMem->z = 0;
+ }else{
+ sqlite3VdbeMemClearAndResize(pMem, nByte);
+ pMem->flags = MEM_Agg;
+ pMem->u.pDef = p->pFunc;
+ if( pMem->z ){
+ memset(pMem->z, 0, nByte);
+ }
+ }
+ return (void*)pMem->z;
+}
+
+/*
+** Allocate or return the aggregate context for a user function. A new
+** context is allocated on the first call. Subsequent calls return the
+** same context that was returned on prior calls.
+*/
+SQLITE_API void *sqlite3_aggregate_context(sqlite3_context *p, int nByte){
+ assert( p && p->pFunc && p->pFunc->xFinalize );
+ assert( sqlite3_mutex_held(p->pOut->db->mutex) );
+ testcase( nByte<0 );
+ if( (p->pMem->flags & MEM_Agg)==0 ){
+ return createAggContext(p, nByte);
+ }else{
+ return (void*)p->pMem->z;
+ }
+}
+
+/*
+** Return the auxiliary data pointer, if any, for the iArg'th argument to
+** the user-function defined by pCtx.
+*/
+SQLITE_API void *sqlite3_get_auxdata(sqlite3_context *pCtx, int iArg){
+ AuxData *pAuxData;
+
+ assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
+#if SQLITE_ENABLE_STAT3_OR_STAT4
+ if( pCtx->pVdbe==0 ) return 0;
+#else
+ assert( pCtx->pVdbe!=0 );
+#endif
+ for(pAuxData=pCtx->pVdbe->pAuxData; pAuxData; pAuxData=pAuxData->pNext){
+ if( pAuxData->iOp==pCtx->iOp && pAuxData->iArg==iArg ) break;
+ }
+
+ return (pAuxData ? pAuxData->pAux : 0);
+}
+
+/*
+** Set the auxiliary data pointer and delete function, for the iArg'th
+** argument to the user-function defined by pCtx. Any previous value is
+** deleted by calling the delete function specified when it was set.
+*/
+SQLITE_API void sqlite3_set_auxdata(
+ sqlite3_context *pCtx,
+ int iArg,
+ void *pAux,
+ void (*xDelete)(void*)
+){
+ AuxData *pAuxData;
+ Vdbe *pVdbe = pCtx->pVdbe;
+
+ assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
+ if( iArg<0 ) goto failed;
+#ifdef SQLITE_ENABLE_STAT3_OR_STAT4
+ if( pVdbe==0 ) goto failed;
+#else
+ assert( pVdbe!=0 );
+#endif
+
+ for(pAuxData=pVdbe->pAuxData; pAuxData; pAuxData=pAuxData->pNext){
+ if( pAuxData->iOp==pCtx->iOp && pAuxData->iArg==iArg ) break;
+ }
+ if( pAuxData==0 ){
+ pAuxData = sqlite3DbMallocZero(pVdbe->db, sizeof(AuxData));
+ if( !pAuxData ) goto failed;
+ pAuxData->iOp = pCtx->iOp;
+ pAuxData->iArg = iArg;
+ pAuxData->pNext = pVdbe->pAuxData;
+ pVdbe->pAuxData = pAuxData;
+ if( pCtx->fErrorOrAux==0 ){
+ pCtx->isError = 0;
+ pCtx->fErrorOrAux = 1;
+ }
+ }else if( pAuxData->xDelete ){
+ pAuxData->xDelete(pAuxData->pAux);
+ }
+
+ pAuxData->pAux = pAux;
+ pAuxData->xDelete = xDelete;
+ return;
+
+failed:
+ if( xDelete ){
+ xDelete(pAux);
+ }
+}
+
+#ifndef SQLITE_OMIT_DEPRECATED
+/*
+** Return the number of times the Step function of an aggregate has been
+** called.
+**
+** This function is deprecated. Do not use it for new code. It is
+** provide only to avoid breaking legacy code. New aggregate function
+** implementations should keep their own counts within their aggregate
+** context.
+*/
+SQLITE_API int sqlite3_aggregate_count(sqlite3_context *p){
+ assert( p && p->pMem && p->pFunc && p->pFunc->xFinalize );
+ return p->pMem->n;
+}
+#endif
+
+/*
+** Return the number of columns in the result set for the statement pStmt.
+*/
+SQLITE_API int sqlite3_column_count(sqlite3_stmt *pStmt){
+ Vdbe *pVm = (Vdbe *)pStmt;
+ return pVm ? pVm->nResColumn : 0;
+}
+
+/*
+** Return the number of values available from the current row of the
+** currently executing statement pStmt.
+*/
+SQLITE_API int sqlite3_data_count(sqlite3_stmt *pStmt){
+ Vdbe *pVm = (Vdbe *)pStmt;
+ if( pVm==0 || pVm->pResultSet==0 ) return 0;
+ return pVm->nResColumn;
+}
+
+/*
+** Return a pointer to static memory containing an SQL NULL value.
+*/
+static const Mem *columnNullValue(void){
+ /* Even though the Mem structure contains an element
+ ** of type i64, on certain architectures (x86) with certain compiler
+ ** switches (-Os), gcc may align this Mem object on a 4-byte boundary
+ ** instead of an 8-byte one. This all works fine, except that when
+ ** running with SQLITE_DEBUG defined the SQLite code sometimes assert()s
+ ** that a Mem structure is located on an 8-byte boundary. To prevent
+ ** these assert()s from failing, when building with SQLITE_DEBUG defined
+ ** using gcc, we force nullMem to be 8-byte aligned using the magical
+ ** __attribute__((aligned(8))) macro. */
+ static const Mem nullMem
+#if defined(SQLITE_DEBUG) && defined(__GNUC__)
+ __attribute__((aligned(8)))
+#endif
+ = {
+ /* .u = */ {0},
+ /* .flags = */ (u16)MEM_Null,
+ /* .enc = */ (u8)0,
+ /* .eSubtype = */ (u8)0,
+ /* .n = */ (int)0,
+ /* .z = */ (char*)0,
+ /* .zMalloc = */ (char*)0,
+ /* .szMalloc = */ (int)0,
+ /* .uTemp = */ (u32)0,
+ /* .db = */ (sqlite3*)0,
+ /* .xDel = */ (void(*)(void*))0,
+#ifdef SQLITE_DEBUG
+ /* .pScopyFrom = */ (Mem*)0,
+ /* .pFiller = */ (void*)0,
+#endif
+ };
+ return &nullMem;
+}
+
+/*
+** Check to see if column iCol of the given statement is valid. If
+** it is, return a pointer to the Mem for the value of that column.
+** If iCol is not valid, return a pointer to a Mem which has a value
+** of NULL.
+*/
+static Mem *columnMem(sqlite3_stmt *pStmt, int i){
+ Vdbe *pVm;
+ Mem *pOut;
+
+ pVm = (Vdbe *)pStmt;
+ if( pVm==0 ) return (Mem*)columnNullValue();
+ assert( pVm->db );
+ sqlite3_mutex_enter(pVm->db->mutex);
+ if( pVm->pResultSet!=0 && i<pVm->nResColumn && i>=0 ){
+ pOut = &pVm->pResultSet[i];
+ }else{
+ sqlite3Error(pVm->db, SQLITE_RANGE);
+ pOut = (Mem*)columnNullValue();
+ }
+ return pOut;
+}
+
+/*
+** This function is called after invoking an sqlite3_value_XXX function on a
+** column value (i.e. a value returned by evaluating an SQL expression in the
+** select list of a SELECT statement) that may cause a malloc() failure. If
+** malloc() has failed, the threads mallocFailed flag is cleared and the result
+** code of statement pStmt set to SQLITE_NOMEM.
+**
+** Specifically, this is called from within:
+**
+** sqlite3_column_int()
+** sqlite3_column_int64()
+** sqlite3_column_text()
+** sqlite3_column_text16()
+** sqlite3_column_real()
+** sqlite3_column_bytes()
+** sqlite3_column_bytes16()
+** sqiite3_column_blob()
+*/
+static void columnMallocFailure(sqlite3_stmt *pStmt)
+{
+ /* If malloc() failed during an encoding conversion within an
+ ** sqlite3_column_XXX API, then set the return code of the statement to
+ ** SQLITE_NOMEM. The next call to _step() (if any) will return SQLITE_ERROR
+ ** and _finalize() will return NOMEM.
+ */
+ Vdbe *p = (Vdbe *)pStmt;
+ if( p ){
+ assert( p->db!=0 );
+ assert( sqlite3_mutex_held(p->db->mutex) );
+ p->rc = sqlite3ApiExit(p->db, p->rc);
+ sqlite3_mutex_leave(p->db->mutex);
+ }
+}
+
+/**************************** sqlite3_column_ *******************************
+** The following routines are used to access elements of the current row
+** in the result set.
+*/
+SQLITE_API const void *sqlite3_column_blob(sqlite3_stmt *pStmt, int i){
+ const void *val;
+ val = sqlite3_value_blob( columnMem(pStmt,i) );
+ /* Even though there is no encoding conversion, value_blob() might
+ ** need to call malloc() to expand the result of a zeroblob()
+ ** expression.
+ */
+ columnMallocFailure(pStmt);
+ return val;
+}
+SQLITE_API int sqlite3_column_bytes(sqlite3_stmt *pStmt, int i){
+ int val = sqlite3_value_bytes( columnMem(pStmt,i) );
+ columnMallocFailure(pStmt);
+ return val;
+}
+SQLITE_API int sqlite3_column_bytes16(sqlite3_stmt *pStmt, int i){
+ int val = sqlite3_value_bytes16( columnMem(pStmt,i) );
+ columnMallocFailure(pStmt);
+ return val;
+}
+SQLITE_API double sqlite3_column_double(sqlite3_stmt *pStmt, int i){
+ double val = sqlite3_value_double( columnMem(pStmt,i) );
+ columnMallocFailure(pStmt);
+ return val;
+}
+SQLITE_API int sqlite3_column_int(sqlite3_stmt *pStmt, int i){
+ int val = sqlite3_value_int( columnMem(pStmt,i) );
+ columnMallocFailure(pStmt);
+ return val;
+}
+SQLITE_API sqlite_int64 sqlite3_column_int64(sqlite3_stmt *pStmt, int i){
+ sqlite_int64 val = sqlite3_value_int64( columnMem(pStmt,i) );
+ columnMallocFailure(pStmt);
+ return val;
+}
+SQLITE_API const unsigned char *sqlite3_column_text(sqlite3_stmt *pStmt, int i){
+ const unsigned char *val = sqlite3_value_text( columnMem(pStmt,i) );
+ columnMallocFailure(pStmt);
+ return val;
+}
+SQLITE_API sqlite3_value *sqlite3_column_value(sqlite3_stmt *pStmt, int i){
+ Mem *pOut = columnMem(pStmt, i);
+ if( pOut->flags&MEM_Static ){
+ pOut->flags &= ~MEM_Static;
+ pOut->flags |= MEM_Ephem;
+ }
+ columnMallocFailure(pStmt);
+ return (sqlite3_value *)pOut;
+}
+#ifndef SQLITE_OMIT_UTF16
+SQLITE_API const void *sqlite3_column_text16(sqlite3_stmt *pStmt, int i){
+ const void *val = sqlite3_value_text16( columnMem(pStmt,i) );
+ columnMallocFailure(pStmt);
+ return val;
+}
+#endif /* SQLITE_OMIT_UTF16 */
+SQLITE_API int sqlite3_column_type(sqlite3_stmt *pStmt, int i){
+ int iType = sqlite3_value_type( columnMem(pStmt,i) );
+ columnMallocFailure(pStmt);
+ return iType;
+}
+
+/*
+** Convert the N-th element of pStmt->pColName[] into a string using
+** xFunc() then return that string. If N is out of range, return 0.
+**
+** There are up to 5 names for each column. useType determines which
+** name is returned. Here are the names:
+**
+** 0 The column name as it should be displayed for output
+** 1 The datatype name for the column
+** 2 The name of the database that the column derives from
+** 3 The name of the table that the column derives from
+** 4 The name of the table column that the result column derives from
+**
+** If the result is not a simple column reference (if it is an expression
+** or a constant) then useTypes 2, 3, and 4 return NULL.
+*/
+static const void *columnName(
+ sqlite3_stmt *pStmt,
+ int N,
+ const void *(*xFunc)(Mem*),
+ int useType
+){
+ const void *ret;
+ Vdbe *p;
+ int n;
+ sqlite3 *db;
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( pStmt==0 ){
+ (void)SQLITE_MISUSE_BKPT;
+ return 0;
+ }
+#endif
+ ret = 0;
+ p = (Vdbe *)pStmt;
+ db = p->db;
+ assert( db!=0 );
+ n = sqlite3_column_count(pStmt);
+ if( N<n && N>=0 ){
+ N += useType*n;
+ sqlite3_mutex_enter(db->mutex);
+ assert( db->mallocFailed==0 );
+ ret = xFunc(&p->aColName[N]);
+ /* A malloc may have failed inside of the xFunc() call. If this
+ ** is the case, clear the mallocFailed flag and return NULL.
+ */
+ if( db->mallocFailed ){
+ sqlite3OomClear(db);
+ ret = 0;
+ }
+ sqlite3_mutex_leave(db->mutex);
+ }
+ return ret;
+}
+
+/*
+** Return the name of the Nth column of the result set returned by SQL
+** statement pStmt.
+*/
+SQLITE_API const char *sqlite3_column_name(sqlite3_stmt *pStmt, int N){
+ return columnName(
+ pStmt, N, (const void*(*)(Mem*))sqlite3_value_text, COLNAME_NAME);
+}
+#ifndef SQLITE_OMIT_UTF16
+SQLITE_API const void *sqlite3_column_name16(sqlite3_stmt *pStmt, int N){
+ return columnName(
+ pStmt, N, (const void*(*)(Mem*))sqlite3_value_text16, COLNAME_NAME);
+}
+#endif
+
+/*
+** Constraint: If you have ENABLE_COLUMN_METADATA then you must
+** not define OMIT_DECLTYPE.
+*/
+#if defined(SQLITE_OMIT_DECLTYPE) && defined(SQLITE_ENABLE_COLUMN_METADATA)
+# error "Must not define both SQLITE_OMIT_DECLTYPE \
+ and SQLITE_ENABLE_COLUMN_METADATA"
+#endif
+
+#ifndef SQLITE_OMIT_DECLTYPE
+/*
+** Return the column declaration type (if applicable) of the 'i'th column
+** of the result set of SQL statement pStmt.
+*/
+SQLITE_API const char *sqlite3_column_decltype(sqlite3_stmt *pStmt, int N){
+ return columnName(
+ pStmt, N, (const void*(*)(Mem*))sqlite3_value_text, COLNAME_DECLTYPE);
+}
+#ifndef SQLITE_OMIT_UTF16
+SQLITE_API const void *sqlite3_column_decltype16(sqlite3_stmt *pStmt, int N){
+ return columnName(
+ pStmt, N, (const void*(*)(Mem*))sqlite3_value_text16, COLNAME_DECLTYPE);
+}
+#endif /* SQLITE_OMIT_UTF16 */
+#endif /* SQLITE_OMIT_DECLTYPE */
+
+#ifdef SQLITE_ENABLE_COLUMN_METADATA
+/*
+** Return the name of the database from which a result column derives.
+** NULL is returned if the result column is an expression or constant or
+** anything else which is not an unambiguous reference to a database column.
+*/
+SQLITE_API const char *sqlite3_column_database_name(sqlite3_stmt *pStmt, int N){
+ return columnName(
+ pStmt, N, (const void*(*)(Mem*))sqlite3_value_text, COLNAME_DATABASE);
+}
+#ifndef SQLITE_OMIT_UTF16
+SQLITE_API const void *sqlite3_column_database_name16(sqlite3_stmt *pStmt, int N){
+ return columnName(
+ pStmt, N, (const void*(*)(Mem*))sqlite3_value_text16, COLNAME_DATABASE);
+}
+#endif /* SQLITE_OMIT_UTF16 */
+
+/*
+** Return the name of the table from which a result column derives.
+** NULL is returned if the result column is an expression or constant or
+** anything else which is not an unambiguous reference to a database column.
+*/
+SQLITE_API const char *sqlite3_column_table_name(sqlite3_stmt *pStmt, int N){
+ return columnName(
+ pStmt, N, (const void*(*)(Mem*))sqlite3_value_text, COLNAME_TABLE);
+}
+#ifndef SQLITE_OMIT_UTF16
+SQLITE_API const void *sqlite3_column_table_name16(sqlite3_stmt *pStmt, int N){
+ return columnName(
+ pStmt, N, (const void*(*)(Mem*))sqlite3_value_text16, COLNAME_TABLE);
+}
+#endif /* SQLITE_OMIT_UTF16 */
+
+/*
+** Return the name of the table column from which a result column derives.
+** NULL is returned if the result column is an expression or constant or
+** anything else which is not an unambiguous reference to a database column.
+*/
+SQLITE_API const char *sqlite3_column_origin_name(sqlite3_stmt *pStmt, int N){
+ return columnName(
+ pStmt, N, (const void*(*)(Mem*))sqlite3_value_text, COLNAME_COLUMN);
+}
+#ifndef SQLITE_OMIT_UTF16
+SQLITE_API const void *sqlite3_column_origin_name16(sqlite3_stmt *pStmt, int N){
+ return columnName(
+ pStmt, N, (const void*(*)(Mem*))sqlite3_value_text16, COLNAME_COLUMN);
+}
+#endif /* SQLITE_OMIT_UTF16 */
+#endif /* SQLITE_ENABLE_COLUMN_METADATA */
+
+
+/******************************* sqlite3_bind_ ***************************
+**
+** Routines used to attach values to wildcards in a compiled SQL statement.
+*/
+/*
+** Unbind the value bound to variable i in virtual machine p. This is the
+** the same as binding a NULL value to the column. If the "i" parameter is
+** out of range, then SQLITE_RANGE is returned. Othewise SQLITE_OK.
+**
+** A successful evaluation of this routine acquires the mutex on p.
+** the mutex is released if any kind of error occurs.
+**
+** The error code stored in database p->db is overwritten with the return
+** value in any case.
+*/
+static int vdbeUnbind(Vdbe *p, int i){
+ Mem *pVar;
+ if( vdbeSafetyNotNull(p) ){
+ return SQLITE_MISUSE_BKPT;
+ }
+ sqlite3_mutex_enter(p->db->mutex);
+ if( p->magic!=VDBE_MAGIC_RUN || p->pc>=0 ){
+ sqlite3Error(p->db, SQLITE_MISUSE);
+ sqlite3_mutex_leave(p->db->mutex);
+ sqlite3_log(SQLITE_MISUSE,
+ "bind on a busy prepared statement: [%s]", p->zSql);
+ return SQLITE_MISUSE_BKPT;
+ }
+ if( i<1 || i>p->nVar ){
+ sqlite3Error(p->db, SQLITE_RANGE);
+ sqlite3_mutex_leave(p->db->mutex);
+ return SQLITE_RANGE;
+ }
+ i--;
+ pVar = &p->aVar[i];
+ sqlite3VdbeMemRelease(pVar);
+ pVar->flags = MEM_Null;
+ sqlite3Error(p->db, SQLITE_OK);
+
+ /* If the bit corresponding to this variable in Vdbe.expmask is set, then
+ ** binding a new value to this variable invalidates the current query plan.
+ **
+ ** IMPLEMENTATION-OF: R-48440-37595 If the specific value bound to host
+ ** parameter in the WHERE clause might influence the choice of query plan
+ ** for a statement, then the statement will be automatically recompiled,
+ ** as if there had been a schema change, on the first sqlite3_step() call
+ ** following any change to the bindings of that parameter.
+ */
+ if( p->isPrepareV2 &&
+ ((i<32 && p->expmask & ((u32)1 << i)) || p->expmask==0xffffffff)
+ ){
+ p->expired = 1;
+ }
+ return SQLITE_OK;
+}
+
+/*
+** Bind a text or BLOB value.
+*/
+static int bindText(
+ sqlite3_stmt *pStmt, /* The statement to bind against */
+ int i, /* Index of the parameter to bind */
+ const void *zData, /* Pointer to the data to be bound */
+ int nData, /* Number of bytes of data to be bound */
+ void (*xDel)(void*), /* Destructor for the data */
+ u8 encoding /* Encoding for the data */
+){
+ Vdbe *p = (Vdbe *)pStmt;
+ Mem *pVar;
+ int rc;
+
+ rc = vdbeUnbind(p, i);
+ if( rc==SQLITE_OK ){
+ if( zData!=0 ){
+ pVar = &p->aVar[i-1];
+ rc = sqlite3VdbeMemSetStr(pVar, zData, nData, encoding, xDel);
+ if( rc==SQLITE_OK && encoding!=0 ){
+ rc = sqlite3VdbeChangeEncoding(pVar, ENC(p->db));
+ }
+ sqlite3Error(p->db, rc);
+ rc = sqlite3ApiExit(p->db, rc);
+ }
+ sqlite3_mutex_leave(p->db->mutex);
+ }else if( xDel!=SQLITE_STATIC && xDel!=SQLITE_TRANSIENT ){
+ xDel((void*)zData);
+ }
+ return rc;
+}
+
+
+/*
+** Bind a blob value to an SQL statement variable.
+*/
+SQLITE_API int sqlite3_bind_blob(
+ sqlite3_stmt *pStmt,
+ int i,
+ const void *zData,
+ int nData,
+ void (*xDel)(void*)
+){
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( nData<0 ) return SQLITE_MISUSE_BKPT;
+#endif
+ return bindText(pStmt, i, zData, nData, xDel, 0);
+}
+SQLITE_API int sqlite3_bind_blob64(
+ sqlite3_stmt *pStmt,
+ int i,
+ const void *zData,
+ sqlite3_uint64 nData,
+ void (*xDel)(void*)
+){
+ assert( xDel!=SQLITE_DYNAMIC );
+ if( nData>0x7fffffff ){
+ return invokeValueDestructor(zData, xDel, 0);
+ }else{
+ return bindText(pStmt, i, zData, (int)nData, xDel, 0);
+ }
+}
+SQLITE_API int sqlite3_bind_double(sqlite3_stmt *pStmt, int i, double rValue){
+ int rc;
+ Vdbe *p = (Vdbe *)pStmt;
+ rc = vdbeUnbind(p, i);
+ if( rc==SQLITE_OK ){
+ sqlite3VdbeMemSetDouble(&p->aVar[i-1], rValue);
+ sqlite3_mutex_leave(p->db->mutex);
+ }
+ return rc;
+}
+SQLITE_API int sqlite3_bind_int(sqlite3_stmt *p, int i, int iValue){
+ return sqlite3_bind_int64(p, i, (i64)iValue);
+}
+SQLITE_API int sqlite3_bind_int64(sqlite3_stmt *pStmt, int i, sqlite_int64 iValue){
+ int rc;
+ Vdbe *p = (Vdbe *)pStmt;
+ rc = vdbeUnbind(p, i);
+ if( rc==SQLITE_OK ){
+ sqlite3VdbeMemSetInt64(&p->aVar[i-1], iValue);
+ sqlite3_mutex_leave(p->db->mutex);
+ }
+ return rc;
+}
+SQLITE_API int sqlite3_bind_null(sqlite3_stmt *pStmt, int i){
+ int rc;
+ Vdbe *p = (Vdbe*)pStmt;
+ rc = vdbeUnbind(p, i);
+ if( rc==SQLITE_OK ){
+ sqlite3_mutex_leave(p->db->mutex);
+ }
+ return rc;
+}
+SQLITE_API int sqlite3_bind_text(
+ sqlite3_stmt *pStmt,
+ int i,
+ const char *zData,
+ int nData,
+ void (*xDel)(void*)
+){
+ return bindText(pStmt, i, zData, nData, xDel, SQLITE_UTF8);
+}
+SQLITE_API int sqlite3_bind_text64(
+ sqlite3_stmt *pStmt,
+ int i,
+ const char *zData,
+ sqlite3_uint64 nData,
+ void (*xDel)(void*),
+ unsigned char enc
+){
+ assert( xDel!=SQLITE_DYNAMIC );
+ if( nData>0x7fffffff ){
+ return invokeValueDestructor(zData, xDel, 0);
+ }else{
+ if( enc==SQLITE_UTF16 ) enc = SQLITE_UTF16NATIVE;
+ return bindText(pStmt, i, zData, (int)nData, xDel, enc);
+ }
+}
+#ifndef SQLITE_OMIT_UTF16
+SQLITE_API int sqlite3_bind_text16(
+ sqlite3_stmt *pStmt,
+ int i,
+ const void *zData,
+ int nData,
+ void (*xDel)(void*)
+){
+ return bindText(pStmt, i, zData, nData, xDel, SQLITE_UTF16NATIVE);
+}
+#endif /* SQLITE_OMIT_UTF16 */
+SQLITE_API int sqlite3_bind_value(sqlite3_stmt *pStmt, int i, const sqlite3_value *pValue){
+ int rc;
+ switch( sqlite3_value_type((sqlite3_value*)pValue) ){
+ case SQLITE_INTEGER: {
+ rc = sqlite3_bind_int64(pStmt, i, pValue->u.i);
+ break;
+ }
+ case SQLITE_FLOAT: {
+ rc = sqlite3_bind_double(pStmt, i, pValue->u.r);
+ break;
+ }
+ case SQLITE_BLOB: {
+ if( pValue->flags & MEM_Zero ){
+ rc = sqlite3_bind_zeroblob(pStmt, i, pValue->u.nZero);
+ }else{
+ rc = sqlite3_bind_blob(pStmt, i, pValue->z, pValue->n,SQLITE_TRANSIENT);
+ }
+ break;
+ }
+ case SQLITE_TEXT: {
+ rc = bindText(pStmt,i, pValue->z, pValue->n, SQLITE_TRANSIENT,
+ pValue->enc);
+ break;
+ }
+ default: {
+ rc = sqlite3_bind_null(pStmt, i);
+ break;
+ }
+ }
+ return rc;
+}
+SQLITE_API int sqlite3_bind_zeroblob(sqlite3_stmt *pStmt, int i, int n){
+ int rc;
+ Vdbe *p = (Vdbe *)pStmt;
+ rc = vdbeUnbind(p, i);
+ if( rc==SQLITE_OK ){
+ sqlite3VdbeMemSetZeroBlob(&p->aVar[i-1], n);
+ sqlite3_mutex_leave(p->db->mutex);
+ }
+ return rc;
+}
+SQLITE_API int sqlite3_bind_zeroblob64(sqlite3_stmt *pStmt, int i, sqlite3_uint64 n){
+ int rc;
+ Vdbe *p = (Vdbe *)pStmt;
+ sqlite3_mutex_enter(p->db->mutex);
+ if( n>(u64)p->db->aLimit[SQLITE_LIMIT_LENGTH] ){
+ rc = SQLITE_TOOBIG;
+ }else{
+ assert( (n & 0x7FFFFFFF)==n );
+ rc = sqlite3_bind_zeroblob(pStmt, i, n);
+ }
+ rc = sqlite3ApiExit(p->db, rc);
+ sqlite3_mutex_leave(p->db->mutex);
+ return rc;
+}
+
+/*
+** Return the number of wildcards that can be potentially bound to.
+** This routine is added to support DBD::SQLite.
+*/
+SQLITE_API int sqlite3_bind_parameter_count(sqlite3_stmt *pStmt){
+ Vdbe *p = (Vdbe*)pStmt;
+ return p ? p->nVar : 0;
+}
+
+/*
+** Return the name of a wildcard parameter. Return NULL if the index
+** is out of range or if the wildcard is unnamed.
+**
+** The result is always UTF-8.
+*/
+SQLITE_API const char *sqlite3_bind_parameter_name(sqlite3_stmt *pStmt, int i){
+ Vdbe *p = (Vdbe*)pStmt;
+ if( p==0 ) return 0;
+ return sqlite3VListNumToName(p->pVList, i);
+}
+
+/*
+** Given a wildcard parameter name, return the index of the variable
+** with that name. If there is no variable with the given name,
+** return 0.
+*/
+SQLITE_PRIVATE int sqlite3VdbeParameterIndex(Vdbe *p, const char *zName, int nName){
+ if( p==0 || zName==0 ) return 0;
+ return sqlite3VListNameToNum(p->pVList, zName, nName);
+}
+SQLITE_API int sqlite3_bind_parameter_index(sqlite3_stmt *pStmt, const char *zName){
+ return sqlite3VdbeParameterIndex((Vdbe*)pStmt, zName, sqlite3Strlen30(zName));
+}
+
+/*
+** Transfer all bindings from the first statement over to the second.
+*/
+SQLITE_PRIVATE int sqlite3TransferBindings(sqlite3_stmt *pFromStmt, sqlite3_stmt *pToStmt){
+ Vdbe *pFrom = (Vdbe*)pFromStmt;
+ Vdbe *pTo = (Vdbe*)pToStmt;
+ int i;
+ assert( pTo->db==pFrom->db );
+ assert( pTo->nVar==pFrom->nVar );
+ sqlite3_mutex_enter(pTo->db->mutex);
+ for(i=0; i<pFrom->nVar; i++){
+ sqlite3VdbeMemMove(&pTo->aVar[i], &pFrom->aVar[i]);
+ }
+ sqlite3_mutex_leave(pTo->db->mutex);
+ return SQLITE_OK;
+}
+
+#ifndef SQLITE_OMIT_DEPRECATED
+/*
+** Deprecated external interface. Internal/core SQLite code
+** should call sqlite3TransferBindings.
+**
+** It is misuse to call this routine with statements from different
+** database connections. But as this is a deprecated interface, we
+** will not bother to check for that condition.
+**
+** If the two statements contain a different number of bindings, then
+** an SQLITE_ERROR is returned. Nothing else can go wrong, so otherwise
+** SQLITE_OK is returned.
+*/
+SQLITE_API int sqlite3_transfer_bindings(sqlite3_stmt *pFromStmt, sqlite3_stmt *pToStmt){
+ Vdbe *pFrom = (Vdbe*)pFromStmt;
+ Vdbe *pTo = (Vdbe*)pToStmt;
+ if( pFrom->nVar!=pTo->nVar ){
+ return SQLITE_ERROR;
+ }
+ if( pTo->isPrepareV2 && pTo->expmask ){
+ pTo->expired = 1;
+ }
+ if( pFrom->isPrepareV2 && pFrom->expmask ){
+ pFrom->expired = 1;
+ }
+ return sqlite3TransferBindings(pFromStmt, pToStmt);
+}
+#endif
+
+/*
+** Return the sqlite3* database handle to which the prepared statement given
+** in the argument belongs. This is the same database handle that was
+** the first argument to the sqlite3_prepare() that was used to create
+** the statement in the first place.
+*/
+SQLITE_API sqlite3 *sqlite3_db_handle(sqlite3_stmt *pStmt){
+ return pStmt ? ((Vdbe*)pStmt)->db : 0;
+}
+
+/*
+** Return true if the prepared statement is guaranteed to not modify the
+** database.
+*/
+SQLITE_API int sqlite3_stmt_readonly(sqlite3_stmt *pStmt){
+ return pStmt ? ((Vdbe*)pStmt)->readOnly : 1;
+}
+
+/*
+** Return true if the prepared statement is in need of being reset.
+*/
+SQLITE_API int sqlite3_stmt_busy(sqlite3_stmt *pStmt){
+ Vdbe *v = (Vdbe*)pStmt;
+ return v!=0 && v->magic==VDBE_MAGIC_RUN && v->pc>=0;
+}
+
+/*
+** Return a pointer to the next prepared statement after pStmt associated
+** with database connection pDb. If pStmt is NULL, return the first
+** prepared statement for the database connection. Return NULL if there
+** are no more.
+*/
+SQLITE_API sqlite3_stmt *sqlite3_next_stmt(sqlite3 *pDb, sqlite3_stmt *pStmt){
+ sqlite3_stmt *pNext;
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( !sqlite3SafetyCheckOk(pDb) ){
+ (void)SQLITE_MISUSE_BKPT;
+ return 0;
+ }
+#endif
+ sqlite3_mutex_enter(pDb->mutex);
+ if( pStmt==0 ){
+ pNext = (sqlite3_stmt*)pDb->pVdbe;
+ }else{
+ pNext = (sqlite3_stmt*)((Vdbe*)pStmt)->pNext;
+ }
+ sqlite3_mutex_leave(pDb->mutex);
+ return pNext;
+}
+
+/*
+** Return the value of a status counter for a prepared statement
+*/
+SQLITE_API int sqlite3_stmt_status(sqlite3_stmt *pStmt, int op, int resetFlag){
+ Vdbe *pVdbe = (Vdbe*)pStmt;
+ u32 v;
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( !pStmt ){
+ (void)SQLITE_MISUSE_BKPT;
+ return 0;
+ }
+#endif
+ v = pVdbe->aCounter[op];
+ if( resetFlag ) pVdbe->aCounter[op] = 0;
+ return (int)v;
+}
+
+/*
+** Return the SQL associated with a prepared statement
+*/
+SQLITE_API const char *sqlite3_sql(sqlite3_stmt *pStmt){
+ Vdbe *p = (Vdbe *)pStmt;
+ return p ? p->zSql : 0;
+}
+
+/*
+** Return the SQL associated with a prepared statement with
+** bound parameters expanded. Space to hold the returned string is
+** obtained from sqlite3_malloc(). The caller is responsible for
+** freeing the returned string by passing it to sqlite3_free().
+**
+** The SQLITE_TRACE_SIZE_LIMIT puts an upper bound on the size of
+** expanded bound parameters.
+*/
+SQLITE_API char *sqlite3_expanded_sql(sqlite3_stmt *pStmt){
+#ifdef SQLITE_OMIT_TRACE
+ return 0;
+#else
+ char *z = 0;
+ const char *zSql = sqlite3_sql(pStmt);
+ if( zSql ){
+ Vdbe *p = (Vdbe *)pStmt;
+ sqlite3_mutex_enter(p->db->mutex);
+ z = sqlite3VdbeExpandSql(p, zSql);
+ sqlite3_mutex_leave(p->db->mutex);
+ }
+ return z;
+#endif
+}
+
+#ifdef SQLITE_ENABLE_PREUPDATE_HOOK
+/*
+** Allocate and populate an UnpackedRecord structure based on the serialized
+** record in nKey/pKey. Return a pointer to the new UnpackedRecord structure
+** if successful, or a NULL pointer if an OOM error is encountered.
+*/
+static UnpackedRecord *vdbeUnpackRecord(
+ KeyInfo *pKeyInfo,
+ int nKey,
+ const void *pKey
+){
+ UnpackedRecord *pRet; /* Return value */
+
+ pRet = sqlite3VdbeAllocUnpackedRecord(pKeyInfo);
+ if( pRet ){
+ memset(pRet->aMem, 0, sizeof(Mem)*(pKeyInfo->nField+1));
+ sqlite3VdbeRecordUnpack(pKeyInfo, nKey, pKey, pRet);
+ }
+ return pRet;
+}
+
+/*
+** This function is called from within a pre-update callback to retrieve
+** a field of the row currently being updated or deleted.
+*/
+SQLITE_API int sqlite3_preupdate_old(sqlite3 *db, int iIdx, sqlite3_value **ppValue){
+ PreUpdate *p = db->pPreUpdate;
+ Mem *pMem;
+ int rc = SQLITE_OK;
+
+ /* Test that this call is being made from within an SQLITE_DELETE or
+ ** SQLITE_UPDATE pre-update callback, and that iIdx is within range. */
+ if( !p || p->op==SQLITE_INSERT ){
+ rc = SQLITE_MISUSE_BKPT;
+ goto preupdate_old_out;
+ }
+ if( p->pPk ){
+ iIdx = sqlite3ColumnOfIndex(p->pPk, iIdx);
+ }
+ if( iIdx>=p->pCsr->nField || iIdx<0 ){
+ rc = SQLITE_RANGE;
+ goto preupdate_old_out;
+ }
+
+ /* If the old.* record has not yet been loaded into memory, do so now. */
+ if( p->pUnpacked==0 ){
+ u32 nRec;
+ u8 *aRec;
+
+ nRec = sqlite3BtreePayloadSize(p->pCsr->uc.pCursor);
+ aRec = sqlite3DbMallocRaw(db, nRec);
+ if( !aRec ) goto preupdate_old_out;
+ rc = sqlite3BtreePayload(p->pCsr->uc.pCursor, 0, nRec, aRec);
+ if( rc==SQLITE_OK ){
+ p->pUnpacked = vdbeUnpackRecord(&p->keyinfo, nRec, aRec);
+ if( !p->pUnpacked ) rc = SQLITE_NOMEM;
+ }
+ if( rc!=SQLITE_OK ){
+ sqlite3DbFree(db, aRec);
+ goto preupdate_old_out;
+ }
+ p->aRecord = aRec;
+ }
+
+ pMem = *ppValue = &p->pUnpacked->aMem[iIdx];
+ if( iIdx==p->pTab->iPKey ){
+ sqlite3VdbeMemSetInt64(pMem, p->iKey1);
+ }else if( iIdx>=p->pUnpacked->nField ){
+ *ppValue = (sqlite3_value *)columnNullValue();
+ }else if( p->pTab->aCol[iIdx].affinity==SQLITE_AFF_REAL ){
+ if( pMem->flags & MEM_Int ){
+ sqlite3VdbeMemRealify(pMem);
+ }
+ }
+
+ preupdate_old_out:
+ sqlite3Error(db, rc);
+ return sqlite3ApiExit(db, rc);
+}
+#endif /* SQLITE_ENABLE_PREUPDATE_HOOK */
+
+#ifdef SQLITE_ENABLE_PREUPDATE_HOOK
+/*
+** This function is called from within a pre-update callback to retrieve
+** the number of columns in the row being updated, deleted or inserted.
+*/
+SQLITE_API int sqlite3_preupdate_count(sqlite3 *db){
+ PreUpdate *p = db->pPreUpdate;
+ return (p ? p->keyinfo.nField : 0);
+}
+#endif /* SQLITE_ENABLE_PREUPDATE_HOOK */
+
+#ifdef SQLITE_ENABLE_PREUPDATE_HOOK
+/*
+** This function is designed to be called from within a pre-update callback
+** only. It returns zero if the change that caused the callback was made
+** immediately by a user SQL statement. Or, if the change was made by a
+** trigger program, it returns the number of trigger programs currently
+** on the stack (1 for a top-level trigger, 2 for a trigger fired by a
+** top-level trigger etc.).
+**
+** For the purposes of the previous paragraph, a foreign key CASCADE, SET NULL
+** or SET DEFAULT action is considered a trigger.
+*/
+SQLITE_API int sqlite3_preupdate_depth(sqlite3 *db){
+ PreUpdate *p = db->pPreUpdate;
+ return (p ? p->v->nFrame : 0);
+}
+#endif /* SQLITE_ENABLE_PREUPDATE_HOOK */
+
+#ifdef SQLITE_ENABLE_PREUPDATE_HOOK
+/*
+** This function is called from within a pre-update callback to retrieve
+** a field of the row currently being updated or inserted.
+*/
+SQLITE_API int sqlite3_preupdate_new(sqlite3 *db, int iIdx, sqlite3_value **ppValue){
+ PreUpdate *p = db->pPreUpdate;
+ int rc = SQLITE_OK;
+ Mem *pMem;
+
+ if( !p || p->op==SQLITE_DELETE ){
+ rc = SQLITE_MISUSE_BKPT;
+ goto preupdate_new_out;
+ }
+ if( p->pPk && p->op!=SQLITE_UPDATE ){
+ iIdx = sqlite3ColumnOfIndex(p->pPk, iIdx);
+ }
+ if( iIdx>=p->pCsr->nField || iIdx<0 ){
+ rc = SQLITE_RANGE;
+ goto preupdate_new_out;
+ }
+
+ if( p->op==SQLITE_INSERT ){
+ /* For an INSERT, memory cell p->iNewReg contains the serialized record
+ ** that is being inserted. Deserialize it. */
+ UnpackedRecord *pUnpack = p->pNewUnpacked;
+ if( !pUnpack ){
+ Mem *pData = &p->v->aMem[p->iNewReg];
+ rc = ExpandBlob(pData);
+ if( rc!=SQLITE_OK ) goto preupdate_new_out;
+ pUnpack = vdbeUnpackRecord(&p->keyinfo, pData->n, pData->z);
+ if( !pUnpack ){
+ rc = SQLITE_NOMEM;
+ goto preupdate_new_out;
+ }
+ p->pNewUnpacked = pUnpack;
+ }
+ pMem = &pUnpack->aMem[iIdx];
+ if( iIdx==p->pTab->iPKey ){
+ sqlite3VdbeMemSetInt64(pMem, p->iKey2);
+ }else if( iIdx>=pUnpack->nField ){
+ pMem = (sqlite3_value *)columnNullValue();
+ }
+ }else{
+ /* For an UPDATE, memory cell (p->iNewReg+1+iIdx) contains the required
+ ** value. Make a copy of the cell contents and return a pointer to it.
+ ** It is not safe to return a pointer to the memory cell itself as the
+ ** caller may modify the value text encoding.
+ */
+ assert( p->op==SQLITE_UPDATE );
+ if( !p->aNew ){
+ p->aNew = (Mem *)sqlite3DbMallocZero(db, sizeof(Mem) * p->pCsr->nField);
+ if( !p->aNew ){
+ rc = SQLITE_NOMEM;
+ goto preupdate_new_out;
+ }
+ }
+ assert( iIdx>=0 && iIdx<p->pCsr->nField );
+ pMem = &p->aNew[iIdx];
+ if( pMem->flags==0 ){
+ if( iIdx==p->pTab->iPKey ){
+ sqlite3VdbeMemSetInt64(pMem, p->iKey2);
+ }else{
+ rc = sqlite3VdbeMemCopy(pMem, &p->v->aMem[p->iNewReg+1+iIdx]);
+ if( rc!=SQLITE_OK ) goto preupdate_new_out;
+ }
+ }
+ }
+ *ppValue = pMem;
+
+ preupdate_new_out:
+ sqlite3Error(db, rc);
+ return sqlite3ApiExit(db, rc);
+}
+#endif /* SQLITE_ENABLE_PREUPDATE_HOOK */
+
+#ifdef SQLITE_ENABLE_STMT_SCANSTATUS
+/*
+** Return status data for a single loop within query pStmt.
+*/
+SQLITE_API int sqlite3_stmt_scanstatus(
+ sqlite3_stmt *pStmt, /* Prepared statement being queried */
+ int idx, /* Index of loop to report on */
+ int iScanStatusOp, /* Which metric to return */
+ void *pOut /* OUT: Write the answer here */
+){
+ Vdbe *p = (Vdbe*)pStmt;
+ ScanStatus *pScan;
+ if( idx<0 || idx>=p->nScan ) return 1;
+ pScan = &p->aScan[idx];
+ switch( iScanStatusOp ){
+ case SQLITE_SCANSTAT_NLOOP: {
+ *(sqlite3_int64*)pOut = p->anExec[pScan->addrLoop];
+ break;
+ }
+ case SQLITE_SCANSTAT_NVISIT: {
+ *(sqlite3_int64*)pOut = p->anExec[pScan->addrVisit];
+ break;
+ }
+ case SQLITE_SCANSTAT_EST: {
+ double r = 1.0;
+ LogEst x = pScan->nEst;
+ while( x<100 ){
+ x += 10;
+ r *= 0.5;
+ }
+ *(double*)pOut = r*sqlite3LogEstToInt(x);
+ break;
+ }
+ case SQLITE_SCANSTAT_NAME: {
+ *(const char**)pOut = pScan->zName;
+ break;
+ }
+ case SQLITE_SCANSTAT_EXPLAIN: {
+ if( pScan->addrExplain ){
+ *(const char**)pOut = p->aOp[ pScan->addrExplain ].p4.z;
+ }else{
+ *(const char**)pOut = 0;
+ }
+ break;
+ }
+ case SQLITE_SCANSTAT_SELECTID: {
+ if( pScan->addrExplain ){
+ *(int*)pOut = p->aOp[ pScan->addrExplain ].p1;
+ }else{
+ *(int*)pOut = -1;
+ }
+ break;
+ }
+ default: {
+ return 1;
+ }
+ }
+ return 0;
+}
+
+/*
+** Zero all counters associated with the sqlite3_stmt_scanstatus() data.
+*/
+SQLITE_API void sqlite3_stmt_scanstatus_reset(sqlite3_stmt *pStmt){
+ Vdbe *p = (Vdbe*)pStmt;
+ memset(p->anExec, 0, p->nOp * sizeof(i64));
+}
+#endif /* SQLITE_ENABLE_STMT_SCANSTATUS */
+
+/************** End of vdbeapi.c *********************************************/
+/************** Begin file vdbetrace.c ***************************************/
+/*
+** 2009 November 25
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+**
+** This file contains code used to insert the values of host parameters
+** (aka "wildcards") into the SQL text output by sqlite3_trace().
+**
+** The Vdbe parse-tree explainer is also found here.
+*/
+/* #include "sqliteInt.h" */
+/* #include "vdbeInt.h" */
+
+#ifndef SQLITE_OMIT_TRACE
+
+/*
+** zSql is a zero-terminated string of UTF-8 SQL text. Return the number of
+** bytes in this text up to but excluding the first character in
+** a host parameter. If the text contains no host parameters, return
+** the total number of bytes in the text.
+*/
+static int findNextHostParameter(const char *zSql, int *pnToken){
+ int tokenType;
+ int nTotal = 0;
+ int n;
+
+ *pnToken = 0;
+ while( zSql[0] ){
+ n = sqlite3GetToken((u8*)zSql, &tokenType);
+ assert( n>0 && tokenType!=TK_ILLEGAL );
+ if( tokenType==TK_VARIABLE ){
+ *pnToken = n;
+ break;
+ }
+ nTotal += n;
+ zSql += n;
+ }
+ return nTotal;
+}
+
+/*
+** This function returns a pointer to a nul-terminated string in memory
+** obtained from sqlite3DbMalloc(). If sqlite3.nVdbeExec is 1, then the
+** string contains a copy of zRawSql but with host parameters expanded to
+** their current bindings. Or, if sqlite3.nVdbeExec is greater than 1,
+** then the returned string holds a copy of zRawSql with "-- " prepended
+** to each line of text.
+**
+** If the SQLITE_TRACE_SIZE_LIMIT macro is defined to an integer, then
+** then long strings and blobs are truncated to that many bytes. This
+** can be used to prevent unreasonably large trace strings when dealing
+** with large (multi-megabyte) strings and blobs.
+**
+** The calling function is responsible for making sure the memory returned
+** is eventually freed.
+**
+** ALGORITHM: Scan the input string looking for host parameters in any of
+** these forms: ?, ?N, $A, @A, :A. Take care to avoid text within
+** string literals, quoted identifier names, and comments. For text forms,
+** the host parameter index is found by scanning the prepared
+** statement for the corresponding OP_Variable opcode. Once the host
+** parameter index is known, locate the value in p->aVar[]. Then render
+** the value as a literal in place of the host parameter name.
+*/
+SQLITE_PRIVATE char *sqlite3VdbeExpandSql(
+ Vdbe *p, /* The prepared statement being evaluated */
+ const char *zRawSql /* Raw text of the SQL statement */
+){
+ sqlite3 *db; /* The database connection */
+ int idx = 0; /* Index of a host parameter */
+ int nextIndex = 1; /* Index of next ? host parameter */
+ int n; /* Length of a token prefix */
+ int nToken; /* Length of the parameter token */
+ int i; /* Loop counter */
+ Mem *pVar; /* Value of a host parameter */
+ StrAccum out; /* Accumulate the output here */
+#ifndef SQLITE_OMIT_UTF16
+ Mem utf8; /* Used to convert UTF16 parameters into UTF8 for display */
+#endif
+ char zBase[100]; /* Initial working space */
+
+ db = p->db;
+ sqlite3StrAccumInit(&out, 0, zBase, sizeof(zBase),
+ db->aLimit[SQLITE_LIMIT_LENGTH]);
+ if( db->nVdbeExec>1 ){
+ while( *zRawSql ){
+ const char *zStart = zRawSql;
+ while( *(zRawSql++)!='\n' && *zRawSql );
+ sqlite3StrAccumAppend(&out, "-- ", 3);
+ assert( (zRawSql - zStart) > 0 );
+ sqlite3StrAccumAppend(&out, zStart, (int)(zRawSql-zStart));
+ }
+ }else if( p->nVar==0 ){
+ sqlite3StrAccumAppend(&out, zRawSql, sqlite3Strlen30(zRawSql));
+ }else{
+ while( zRawSql[0] ){
+ n = findNextHostParameter(zRawSql, &nToken);
+ assert( n>0 );
+ sqlite3StrAccumAppend(&out, zRawSql, n);
+ zRawSql += n;
+ assert( zRawSql[0] || nToken==0 );
+ if( nToken==0 ) break;
+ if( zRawSql[0]=='?' ){
+ if( nToken>1 ){
+ assert( sqlite3Isdigit(zRawSql[1]) );
+ sqlite3GetInt32(&zRawSql[1], &idx);
+ }else{
+ idx = nextIndex;
+ }
+ }else{
+ assert( zRawSql[0]==':' || zRawSql[0]=='$' ||
+ zRawSql[0]=='@' || zRawSql[0]=='#' );
+ testcase( zRawSql[0]==':' );
+ testcase( zRawSql[0]=='$' );
+ testcase( zRawSql[0]=='@' );
+ testcase( zRawSql[0]=='#' );
+ idx = sqlite3VdbeParameterIndex(p, zRawSql, nToken);
+ assert( idx>0 );
+ }
+ zRawSql += nToken;
+ nextIndex = idx + 1;
+ assert( idx>0 && idx<=p->nVar );
+ pVar = &p->aVar[idx-1];
+ if( pVar->flags & MEM_Null ){
+ sqlite3StrAccumAppend(&out, "NULL", 4);
+ }else if( pVar->flags & MEM_Int ){
+ sqlite3XPrintf(&out, "%lld", pVar->u.i);
+ }else if( pVar->flags & MEM_Real ){
+ sqlite3XPrintf(&out, "%!.15g", pVar->u.r);
+ }else if( pVar->flags & MEM_Str ){
+ int nOut; /* Number of bytes of the string text to include in output */
+#ifndef SQLITE_OMIT_UTF16
+ u8 enc = ENC(db);
+ if( enc!=SQLITE_UTF8 ){
+ memset(&utf8, 0, sizeof(utf8));
+ utf8.db = db;
+ sqlite3VdbeMemSetStr(&utf8, pVar->z, pVar->n, enc, SQLITE_STATIC);
+ if( SQLITE_NOMEM==sqlite3VdbeChangeEncoding(&utf8, SQLITE_UTF8) ){
+ out.accError = STRACCUM_NOMEM;
+ out.nAlloc = 0;
+ }
+ pVar = &utf8;
+ }
+#endif
+ nOut = pVar->n;
+#ifdef SQLITE_TRACE_SIZE_LIMIT
+ if( nOut>SQLITE_TRACE_SIZE_LIMIT ){
+ nOut = SQLITE_TRACE_SIZE_LIMIT;
+ while( nOut<pVar->n && (pVar->z[nOut]&0xc0)==0x80 ){ nOut++; }
+ }
+#endif
+ sqlite3XPrintf(&out, "'%.*q'", nOut, pVar->z);
+#ifdef SQLITE_TRACE_SIZE_LIMIT
+ if( nOut<pVar->n ){
+ sqlite3XPrintf(&out, "/*+%d bytes*/", pVar->n-nOut);
+ }
+#endif
+#ifndef SQLITE_OMIT_UTF16
+ if( enc!=SQLITE_UTF8 ) sqlite3VdbeMemRelease(&utf8);
+#endif
+ }else if( pVar->flags & MEM_Zero ){
+ sqlite3XPrintf(&out, "zeroblob(%d)", pVar->u.nZero);
+ }else{
+ int nOut; /* Number of bytes of the blob to include in output */
+ assert( pVar->flags & MEM_Blob );
+ sqlite3StrAccumAppend(&out, "x'", 2);
+ nOut = pVar->n;
+#ifdef SQLITE_TRACE_SIZE_LIMIT
+ if( nOut>SQLITE_TRACE_SIZE_LIMIT ) nOut = SQLITE_TRACE_SIZE_LIMIT;
+#endif
+ for(i=0; i<nOut; i++){
+ sqlite3XPrintf(&out, "%02x", pVar->z[i]&0xff);
+ }
+ sqlite3StrAccumAppend(&out, "'", 1);
+#ifdef SQLITE_TRACE_SIZE_LIMIT
+ if( nOut<pVar->n ){
+ sqlite3XPrintf(&out, "/*+%d bytes*/", pVar->n-nOut);
+ }
+#endif
+ }
+ }
+ }
+ if( out.accError ) sqlite3StrAccumReset(&out);
+ return sqlite3StrAccumFinish(&out);
+}
+
+#endif /* #ifndef SQLITE_OMIT_TRACE */
+
+/************** End of vdbetrace.c *******************************************/
+
+/* Chain include. */
+#include "sqlite3.04.c"
« no previous file with comments | « third_party/sqlite/amalgamation/sqlite3.02.c ('k') | third_party/sqlite/amalgamation/sqlite3.04.c » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698