| Index: third_party/sqlite/patches/0016-fts2-Fix-numerous-out-of-bounds-bugs-reading-corrupt.patch
|
| diff --git a/third_party/sqlite/fts2.patch b/third_party/sqlite/patches/0016-fts2-Fix-numerous-out-of-bounds-bugs-reading-corrupt.patch
|
| similarity index 77%
|
| rename from third_party/sqlite/fts2.patch
|
| rename to third_party/sqlite/patches/0016-fts2-Fix-numerous-out-of-bounds-bugs-reading-corrupt.patch
|
| index 0cb1a8829d61967a51587d18f0991a9ac970c976..a51d1dfe77442ceeacaf189bb3c69420a0773a19 100644
|
| --- a/third_party/sqlite/fts2.patch
|
| +++ b/third_party/sqlite/patches/0016-fts2-Fix-numerous-out-of-bounds-bugs-reading-corrupt.patch
|
| @@ -1,58 +1,23 @@
|
| -diff -ru ext-orig/fts2/fts2.c ext/fts2/fts2.c
|
| ---- ext-orig/fts2/fts2.c 2009-09-04 13:37:41.000000000 -0700
|
| -+++ ext/fts2/fts2.c 2009-09-30 14:48:14.000000000 -0700
|
| -@@ -37,6 +37,20 @@
|
| - ** This is an SQLite module implementing full-text search.
|
| - */
|
| -
|
| -+/* TODO(shess): To make it easier to spot changes without groveling
|
| -+** through changelogs, I've defined GEARS_FTS2_CHANGES to call them
|
| -+** out, and I will document them here. On imports, these changes
|
| -+** should be reviewed to make sure they are still present, or are
|
| -+** dropped as appropriate.
|
| -+**
|
| -+** SQLite core adds the custom function fts2_tokenizer() to be used
|
| -+** for defining new tokenizers. The second parameter is a vtable
|
| -+** pointer encoded as a blob. Obviously this cannot be exposed to
|
| -+** Gears callers for security reasons. It could be suppressed in the
|
| -+** authorizer, but for now I have simply commented the definition out.
|
| -+*/
|
| -+#define GEARS_FTS2_CHANGES 1
|
| -+
|
| - /*
|
| - ** The code in this file is only compiled if:
|
| - **
|
| -@@ -326,8 +326,10 @@
|
| - #include "fts2_hash.h"
|
| - #include "fts2_tokenizer.h"
|
| - #include "sqlite3.h"
|
| --#include "sqlite3ext.h"
|
| --SQLITE_EXTENSION_INIT1
|
| -+#ifndef SQLITE_CORE
|
| -+# include "sqlite3ext.h"
|
| -+ SQLITE_EXTENSION_INIT1
|
| -+#endif
|
| -
|
| -
|
| - /* TODO(shess) MAN, this thing needs some refactoring. At minimum, it
|
| -@@ -335,6 +349,16 @@
|
| - # define TRACE(A)
|
| - #endif
|
| -
|
| -+#if 0
|
| -+/* Useful to set breakpoints. See main.c sqlite3Corrupt(). */
|
| -+static int fts2Corrupt(void){
|
| -+ return SQLITE_CORRUPT;
|
| -+}
|
| -+# define SQLITE_CORRUPT_BKPT fts2Corrupt()
|
| -+#else
|
| -+# define SQLITE_CORRUPT_BKPT SQLITE_CORRUPT
|
| -+#endif
|
| -+
|
| - /* It is not safe to call isspace(), tolower(), or isalnum() on
|
| - ** hi-bit-set characters. This is the same solution used in the
|
| - ** tokenizer.
|
| -@@ -423,30 +447,41 @@
|
| +From 5938a2cdd5c19c9afe646425abe86d5cb75b6d1a Mon Sep 17 00:00:00 2001
|
| +From: Chris Evans <cevans@chromium.org>
|
| +Date: Wed, 30 Sep 2009 23:10:34 +0000
|
| +Subject: [PATCH 16/16] [fts2] Fix numerous out-of-bounds bugs reading corrupt
|
| + database.
|
| +
|
| +Fix numerous bugs in fts2 where a corrupt fts2 database could cause
|
| +out-of-bounds reads and writes.
|
| +
|
| +Original review URL is more descriptive:
|
| +http://codereview.chromium.org/216026
|
| +---
|
| + third_party/sqlite/src/ext/fts2/fts2.c | 751 ++++++++++++++++++++++-----------
|
| + 1 file changed, 514 insertions(+), 237 deletions(-)
|
| +
|
| +diff --git a/third_party/sqlite/src/ext/fts2/fts2.c b/third_party/sqlite/src/ext/fts2/fts2.c
|
| +index a78e3d3..e585a8b 100644
|
| +--- a/third_party/sqlite/src/ext/fts2/fts2.c
|
| ++++ b/third_party/sqlite/src/ext/fts2/fts2.c
|
| +@@ -447,30 +447,41 @@ static int putVarint(char *p, sqlite_int64 v){
|
| /* Read a 64-bit variable-length integer from memory starting at p[0].
|
| * Return the number of bytes read, or 0 on error.
|
| * The value is stored in *v. */
|
| @@ -102,7 +67,7 @@ diff -ru ext-orig/fts2/fts2.c ext/fts2/fts2.c
|
| /*******************************************************************/
|
| /* DataBuffer is used to collect data into a buffer in piecemeal
|
| ** fashion. It implements the usual distinction between amount of
|
| -@@ -615,7 +650,7 @@
|
| +@@ -639,7 +650,7 @@ typedef struct DLReader {
|
|
|
| static int dlrAtEnd(DLReader *pReader){
|
| assert( pReader->nData>=0 );
|
| @@ -111,7 +76,7 @@ diff -ru ext-orig/fts2/fts2.c ext/fts2/fts2.c
|
| }
|
| static sqlite_int64 dlrDocid(DLReader *pReader){
|
| assert( !dlrAtEnd(pReader) );
|
| -@@ -639,7 +674,8 @@
|
| +@@ -663,7 +674,8 @@ static int dlrAllDataBytes(DLReader *pReader){
|
| */
|
| static const char *dlrPosData(DLReader *pReader){
|
| sqlite_int64 iDummy;
|
| @@ -121,7 +86,7 @@ diff -ru ext-orig/fts2/fts2.c ext/fts2/fts2.c
|
| assert( !dlrAtEnd(pReader) );
|
| return pReader->pData+n;
|
| }
|
| -@@ -649,7 +685,7 @@
|
| +@@ -673,7 +685,7 @@ static int dlrPosDataLen(DLReader *pReader){
|
| assert( !dlrAtEnd(pReader) );
|
| return pReader->nElement-n;
|
| }
|
| @@ -130,7 +95,7 @@ diff -ru ext-orig/fts2/fts2.c ext/fts2/fts2.c
|
| assert( !dlrAtEnd(pReader) );
|
|
|
| /* Skip past current doclist element. */
|
| -@@ -658,32 +694,48 @@
|
| +@@ -682,32 +694,48 @@ static void dlrStep(DLReader *pReader){
|
| pReader->nData -= pReader->nElement;
|
|
|
| /* If there is more data, read the next doclist element. */
|
| @@ -192,7 +157,7 @@ diff -ru ext-orig/fts2/fts2.c ext/fts2/fts2.c
|
| assert( pData!=NULL && nData!=0 );
|
| pReader->iType = iType;
|
| pReader->pData = pData;
|
| -@@ -692,10 +744,9 @@
|
| +@@ -716,10 +744,9 @@ static void dlrInit(DLReader *pReader, DocListType iType,
|
| pReader->iDocid = 0;
|
|
|
| /* Load the first element's data. There must be a first element. */
|
| @@ -206,7 +171,7 @@ diff -ru ext-orig/fts2/fts2.c ext/fts2/fts2.c
|
| }
|
|
|
| #ifndef NDEBUG
|
| -@@ -782,9 +833,9 @@
|
| +@@ -806,9 +833,9 @@ static void dlwDestroy(DLWriter *pWriter){
|
| /* TODO(shess) This has become just a helper for docListMerge.
|
| ** Consider a refactor to make this cleaner.
|
| */
|
| @@ -219,7 +184,7 @@ diff -ru ext-orig/fts2/fts2.c ext/fts2/fts2.c
|
| sqlite_int64 iDocid = 0;
|
| char c[VARINT_MAX];
|
| int nFirstOld, nFirstNew; /* Old and new varint len of first docid. */
|
| -@@ -793,7 +844,8 @@
|
| +@@ -817,7 +844,8 @@ static void dlwAppend(DLWriter *pWriter,
|
| #endif
|
|
|
| /* Recode the initial docid as delta from iPrevDocid. */
|
| @@ -229,7 +194,7 @@ diff -ru ext-orig/fts2/fts2.c ext/fts2/fts2.c
|
| assert( nFirstOld<nData || (nFirstOld==nData && pWriter->iType==DL_DOCIDS) );
|
| nFirstNew = putVarint(c, iFirstDocid-pWriter->iPrevDocid);
|
|
|
| -@@ -814,10 +866,11 @@
|
| +@@ -838,10 +866,11 @@ static void dlwAppend(DLWriter *pWriter,
|
| dataBufferAppend(pWriter->b, c, nFirstNew);
|
| }
|
| pWriter->iPrevDocid = iLastDocid;
|
| @@ -244,7 +209,7 @@ diff -ru ext-orig/fts2/fts2.c ext/fts2/fts2.c
|
| }
|
| static void dlwAdd(DLWriter *pWriter, sqlite_int64 iDocid){
|
| char c[VARINT_MAX];
|
| -@@ -878,45 +931,63 @@
|
| +@@ -902,45 +931,63 @@ static int plrEndOffset(PLReader *pReader){
|
| assert( !plrAtEnd(pReader) );
|
| return pReader->iEndOffset;
|
| }
|
| @@ -322,7 +287,7 @@ diff -ru ext-orig/fts2/fts2.c ext/fts2/fts2.c
|
| pReader->pData = dlrPosData(pDLReader);
|
| pReader->nData = dlrPosDataLen(pDLReader);
|
| pReader->iType = pDLReader->iType;
|
| -@@ -924,10 +995,9 @@
|
| +@@ -948,10 +995,9 @@ static void plrInit(PLReader *pReader, DLReader *pDLReader){
|
| pReader->iPosition = 0;
|
| pReader->iStartOffset = 0;
|
| pReader->iEndOffset = 0;
|
| @@ -336,7 +301,7 @@ diff -ru ext-orig/fts2/fts2.c ext/fts2/fts2.c
|
| }
|
|
|
| /*******************************************************************/
|
| -@@ -1113,14 +1183,16 @@
|
| +@@ -1137,14 +1183,16 @@ static void dlcDelete(DLCollector *pCollector){
|
| ** deletion will be trimmed, and will thus not effect a deletion
|
| ** during the merge.
|
| */
|
| @@ -356,7 +321,7 @@ diff -ru ext-orig/fts2/fts2.c ext/fts2/fts2.c
|
| dlwInit(&dlWriter, iOutType, out);
|
|
|
| while( !dlrAtEnd(&dlReader) ){
|
| -@@ -1128,7 +1200,8 @@
|
| +@@ -1152,7 +1200,8 @@ static void docListTrim(DocListType iType, const char *pData, int nData,
|
| PLWriter plWriter;
|
| int match = 0;
|
|
|
| @@ -366,7 +331,7 @@ diff -ru ext-orig/fts2/fts2.c ext/fts2/fts2.c
|
|
|
| while( !plrAtEnd(&plReader) ){
|
| if( iColumn==-1 || plrColumn(&plReader)==iColumn ){
|
| -@@ -1139,7 +1212,11 @@
|
| +@@ -1163,7 +1212,11 @@ static void docListTrim(DocListType iType, const char *pData, int nData,
|
| plwAdd(&plWriter, plrColumn(&plReader), plrPosition(&plReader),
|
| plrStartOffset(&plReader), plrEndOffset(&plReader));
|
| }
|
| @@ -379,7 +344,7 @@ diff -ru ext-orig/fts2/fts2.c ext/fts2/fts2.c
|
| }
|
| if( match ){
|
| plwTerminate(&plWriter);
|
| -@@ -1147,10 +1224,13 @@
|
| +@@ -1171,10 +1224,13 @@ static void docListTrim(DocListType iType, const char *pData, int nData,
|
| }
|
|
|
| plrDestroy(&plReader);
|
| @@ -394,7 +359,7 @@ diff -ru ext-orig/fts2/fts2.c ext/fts2/fts2.c
|
| }
|
|
|
| /* Used by docListMerge() to keep doclists in the ascending order by
|
| -@@ -1207,19 +1287,20 @@
|
| +@@ -1231,19 +1287,20 @@ static void orderedDLReaderReorder(OrderedDLReader *p, int n){
|
| /* TODO(shess) nReaders must be <= MERGE_COUNT. This should probably
|
| ** be fixed.
|
| */
|
| @@ -418,7 +383,7 @@ diff -ru ext-orig/fts2/fts2.c ext/fts2/fts2.c
|
| }
|
|
|
| assert( nReaders<=MERGE_COUNT );
|
| -@@ -1252,20 +1333,23 @@
|
| +@@ -1276,20 +1333,23 @@ static void docListMerge(DataBuffer *out,
|
| nStart += dlrDocDataBytes(readers[0].pReader);
|
| }else{
|
| if( pStart!=0 ){
|
| @@ -445,7 +410,7 @@ diff -ru ext-orig/fts2/fts2.c ext/fts2/fts2.c
|
| }
|
|
|
| /* Get the readers back into order. */
|
| -@@ -1275,8 +1359,11 @@
|
| +@@ -1299,8 +1359,11 @@ static void docListMerge(DataBuffer *out,
|
| }
|
|
|
| /* Copy over any remaining elements. */
|
| @@ -458,7 +423,7 @@ diff -ru ext-orig/fts2/fts2.c ext/fts2/fts2.c
|
| }
|
|
|
| /* Helper function for posListUnion(). Compares the current position
|
| -@@ -1312,30 +1399,40 @@
|
| +@@ -1336,30 +1399,40 @@ static int posListCmp(PLReader *pLeft, PLReader *pRight){
|
| ** work with any doclist type, though both inputs and the output
|
| ** should be the same type.
|
| */
|
| @@ -506,7 +471,7 @@ diff -ru ext-orig/fts2/fts2.c ext/fts2/fts2.c
|
| }
|
| }
|
|
|
| -@@ -1343,56 +1440,75 @@
|
| +@@ -1367,56 +1440,75 @@ static void posListUnion(DLReader *pLeft, DLReader *pRight, DLWriter *pOut){
|
| plwDestroy(&writer);
|
| plrDestroy(&left);
|
| plrDestroy(&right);
|
| @@ -598,7 +563,7 @@ diff -ru ext-orig/fts2/fts2.c ext/fts2/fts2.c
|
| }
|
|
|
| /* pLeft and pRight are DLReaders positioned to the same docid.
|
| -@@ -1407,35 +1523,47 @@
|
| +@@ -1431,35 +1523,47 @@ static void docListUnion(
|
| ** include the positions from pRight that are one more than a
|
| ** position in pLeft. In other words: pRight.iPos==pLeft.iPos+1.
|
| */
|
| @@ -656,7 +621,7 @@ diff -ru ext-orig/fts2/fts2.c ext/fts2/fts2.c
|
| }
|
| }
|
|
|
| -@@ -1446,6 +1574,7 @@
|
| +@@ -1470,6 +1574,7 @@ static void posListPhraseMerge(DLReader *pLeft, DLReader *pRight,
|
|
|
| plrDestroy(&left);
|
| plrDestroy(&right);
|
| @@ -664,7 +629,7 @@ diff -ru ext-orig/fts2/fts2.c ext/fts2/fts2.c
|
| }
|
|
|
| /* We have two doclists with positions: pLeft and pRight.
|
| -@@ -1457,7 +1586,7 @@
|
| +@@ -1481,7 +1586,7 @@ static void posListPhraseMerge(DLReader *pLeft, DLReader *pRight,
|
| ** iType controls the type of data written to pOut. If iType is
|
| ** DL_POSITIONS, the positions are those from pRight.
|
| */
|
| @@ -673,7 +638,7 @@ diff -ru ext-orig/fts2/fts2.c ext/fts2/fts2.c
|
| const char *pLeft, int nLeft,
|
| const char *pRight, int nRight,
|
| DocListType iType,
|
| -@@ -1465,152 +1594,198 @@
|
| +@@ -1489,152 +1594,198 @@ static void docListPhraseMerge(
|
| ){
|
| DLReader left, right;
|
| DLWriter writer;
|
| @@ -906,16 +871,7 @@ diff -ru ext-orig/fts2/fts2.c ext/fts2/fts2.c
|
| }
|
|
|
| static char *string_dup_n(const char *s, int n){
|
| -@@ -1814,7 +1989,7 @@
|
| - /* SEGDIR_MAX_INDEX */ "select max(idx) from %_segdir where level = ?",
|
| - /* SEGDIR_SET */ "insert into %_segdir values (?, ?, ?, ?, ?, ?)",
|
| - /* SEGDIR_SELECT_LEVEL */
|
| -- "select start_block, leaves_end_block, root from %_segdir "
|
| -+ "select start_block, leaves_end_block, root, idx from %_segdir "
|
| - " where level = ? order by idx",
|
| - /* SEGDIR_SPAN */
|
| - "select min(start_block), max(end_block) from %_segdir "
|
| -@@ -3413,7 +3588,8 @@
|
| +@@ -3437,7 +3588,8 @@ static int fulltextNext(sqlite3_vtab_cursor *pCursor){
|
| return SQLITE_OK;
|
| }
|
| rc = sqlite3_bind_int64(c->pStmt, 1, dlrDocid(&c->reader));
|
| @@ -925,21 +881,7 @@ diff -ru ext-orig/fts2/fts2.c ext/fts2/fts2.c
|
| if( rc!=SQLITE_OK ) return rc;
|
| /* TODO(shess) Handle SQLITE_SCHEMA AND SQLITE_BUSY. */
|
| rc = sqlite3_step(c->pStmt);
|
| -@@ -3421,8 +3597,11 @@
|
| - c->eof = 0;
|
| - return SQLITE_OK;
|
| - }
|
| -- /* an error occurred; abort */
|
| -- return rc==SQLITE_DONE ? SQLITE_ERROR : rc;
|
| -+
|
| -+ /* Corrupt if the index refers to missing document. */
|
| -+ if( rc==SQLITE_DONE ) return SQLITE_CORRUPT_BKPT;
|
| -+
|
| -+ return rc;
|
| - }
|
| - }
|
| -
|
| -@@ -3470,14 +3649,18 @@
|
| +@@ -3497,14 +3649,18 @@ static int docListOfTerm(
|
| return rc;
|
| }
|
| dataBufferInit(&new, 0);
|
| @@ -961,36 +903,7 @@ diff -ru ext-orig/fts2/fts2.c ext/fts2/fts2.c
|
| }
|
|
|
| /* Add a new term pTerm[0..nTerm-1] to the query *q.
|
| -@@ -3544,6 +3727,7 @@
|
| - int firstIndex = pQuery->nTerms;
|
| - int iCol;
|
| - int nTerm = 1;
|
| -+ int iEndLast = -1;
|
| -
|
| - int rc = pModule->xOpen(pTokenizer, pSegment, nSegment, &pCursor);
|
| - if( rc!=SQLITE_OK ) return rc;
|
| -@@ -3568,6 +3752,20 @@
|
| - pQuery->nextIsOr = 1;
|
| - continue;
|
| - }
|
| -+
|
| -+ /*
|
| -+ * The ICU tokenizer considers '*' a break character, so the code below
|
| -+ * sets isPrefix correctly, but since that code doesn't eat the '*', the
|
| -+ * ICU tokenizer returns it as the next token. So eat it here until a
|
| -+ * better solution presents itself.
|
| -+ */
|
| -+ if( pQuery->nTerms>0 && nToken==1 && pSegment[iBegin]=='*' &&
|
| -+ iEndLast==iBegin){
|
| -+ pQuery->pTerms[pQuery->nTerms-1].isPrefix = 1;
|
| -+ continue;
|
| -+ }
|
| -+ iEndLast = iEnd;
|
| -+
|
| - queryAdd(pQuery, pToken, nToken);
|
| - if( !inPhrase && iBegin>0 && pSegment[iBegin-1]=='-' ){
|
| - pQuery->pTerms[pQuery->nTerms-1].isNot = 1;
|
| -@@ -3707,18 +3905,30 @@
|
| +@@ -3749,18 +3905,30 @@ static int fulltextQuery(
|
| return rc;
|
| }
|
| dataBufferInit(&new, 0);
|
| @@ -1023,7 +936,7 @@ diff -ru ext-orig/fts2/fts2.c ext/fts2/fts2.c
|
| left = new;
|
| }
|
| }
|
| -@@ -3738,9 +3948,15 @@
|
| +@@ -3780,9 +3948,15 @@ static int fulltextQuery(
|
| return rc;
|
| }
|
| dataBufferInit(&new, 0);
|
| @@ -1040,7 +953,7 @@ diff -ru ext-orig/fts2/fts2.c ext/fts2/fts2.c
|
| left = new;
|
| }
|
|
|
| -@@ -3834,7 +4050,8 @@
|
| +@@ -3876,7 +4050,8 @@ static int fulltextFilter(
|
| rc = fulltextQuery(v, idxNum-QUERY_FULLTEXT, zQuery, -1, &c->result, &c->q);
|
| if( rc!=SQLITE_OK ) return rc;
|
| if( c->result.nData!=0 ){
|
| @@ -1050,7 +963,7 @@ diff -ru ext-orig/fts2/fts2.c ext/fts2/fts2.c
|
| }
|
| break;
|
| }
|
| -@@ -4335,22 +4552,19 @@
|
| +@@ -4377,22 +4552,19 @@ static void interiorReaderDestroy(InteriorReader *pReader){
|
| SCRAMBLE(pReader);
|
| }
|
|
|
| @@ -1078,7 +991,7 @@ diff -ru ext-orig/fts2/fts2.c ext/fts2/fts2.c
|
| pReader->pData = pData+1+n;
|
| pReader->nData = nData-(1+n);
|
|
|
| -@@ -4361,17 +4575,18 @@
|
| +@@ -4403,17 +4575,18 @@ static void interiorReaderInit(const char *pData, int nData,
|
| if( pReader->nData==0 ){
|
| dataBufferInit(&pReader->term, 0);
|
| }else{
|
| @@ -1100,7 +1013,7 @@ diff -ru ext-orig/fts2/fts2.c ext/fts2/fts2.c
|
| }
|
|
|
| static sqlite_int64 interiorReaderCurrentBlockid(InteriorReader *pReader){
|
| -@@ -4388,7 +4603,7 @@
|
| +@@ -4430,7 +4603,7 @@ static const char *interiorReaderTerm(InteriorReader *pReader){
|
| }
|
|
|
| /* Step forward to the next term in the node. */
|
| @@ -1109,7 +1022,7 @@ diff -ru ext-orig/fts2/fts2.c ext/fts2/fts2.c
|
| assert( !interiorReaderAtEnd(pReader) );
|
|
|
| /* If the last term has been read, signal eof, else construct the
|
| -@@ -4399,18 +4614,26 @@
|
| +@@ -4441,18 +4614,26 @@ static void interiorReaderStep(InteriorReader *pReader){
|
| }else{
|
| int n, nPrefix, nSuffix;
|
|
|
| @@ -1142,7 +1055,7 @@ diff -ru ext-orig/fts2/fts2.c ext/fts2/fts2.c
|
| }
|
|
|
| /* Compare the current term to pTerm[nTerm], returning strcmp-style
|
| -@@ -4782,7 +5005,8 @@
|
| +@@ -4824,7 +5005,8 @@ static int leafWriterStepMerge(fulltext_vtab *v, LeafWriter *pWriter,
|
| n = putVarint(c, nData);
|
| dataBufferAppend(&pWriter->data, c, n);
|
|
|
| @@ -1152,7 +1065,7 @@ diff -ru ext-orig/fts2/fts2.c ext/fts2/fts2.c
|
| ASSERT_VALID_DOCLIST(DL_DEFAULT,
|
| pWriter->data.pData+iDoclistData+n,
|
| pWriter->data.nData-iDoclistData-n, NULL);
|
| -@@ -4892,7 +5116,8 @@
|
| +@@ -4934,7 +5116,8 @@ static int leafWriterStep(fulltext_vtab *v, LeafWriter *pWriter,
|
| int rc;
|
| DLReader reader;
|
|
|
| @@ -1162,7 +1075,7 @@ diff -ru ext-orig/fts2/fts2.c ext/fts2/fts2.c
|
| rc = leafWriterStepMerge(v, pWriter, pTerm, nTerm, &reader, 1);
|
| dlrDestroy(&reader);
|
|
|
| -@@ -4937,38 +5162,40 @@
|
| +@@ -4979,38 +5162,40 @@ static int leafReaderDataBytes(LeafReader *pReader){
|
| static const char *leafReaderData(LeafReader *pReader){
|
| int n, nData;
|
| assert( pReader->term.nData>0 );
|
| @@ -1211,7 +1124,7 @@ diff -ru ext-orig/fts2/fts2.c ext/fts2/fts2.c
|
| pReader->pData += n+nData;
|
| pReader->nData -= n+nData;
|
|
|
| -@@ -4976,15 +5203,23 @@
|
| +@@ -5018,15 +5203,23 @@ static void leafReaderStep(LeafReader *pReader){
|
| /* Construct the new term using a prefix from the old term plus a
|
| ** suffix from the leaf data.
|
| */
|
| @@ -1241,25 +1154,15 @@ diff -ru ext-orig/fts2/fts2.c ext/fts2/fts2.c
|
| }
|
|
|
| /* strcmp-style comparison of pReader's current term against pTerm.
|
| -@@ -5077,6 +5312,9 @@
|
| - ** the leaf data was entirely contained in the root), or from the
|
| - ** stream of blocks between iStartBlockid and iEndBlockid, inclusive.
|
| - */
|
| -+/* TODO(shess): Figure out a means of indicating how many leaves are
|
| -+** expected, for purposes of detecting corruption.
|
| -+*/
|
| - static int leavesReaderInit(fulltext_vtab *v,
|
| - int idx,
|
| - sqlite_int64 iStartBlockid,
|
| -@@ -5088,32 +5326,67 @@
|
| +@@ -5133,14 +5326,19 @@ static int leavesReaderInit(fulltext_vtab *v,
|
|
|
| dataBufferInit(&pReader->rootData, 0);
|
| if( iStartBlockid==0 ){
|
| + int rc;
|
| -+ /* Corrupt if this can't be a leaf node. */
|
| -+ if( pRootData==NULL || nRootData<1 || pRootData[0]!='\0' ){
|
| -+ return SQLITE_CORRUPT_BKPT;
|
| -+ }
|
| + /* Corrupt if this can't be a leaf node. */
|
| + if( pRootData==NULL || nRootData<1 || pRootData[0]!='\0' ){
|
| + return SQLITE_CORRUPT_BKPT;
|
| + }
|
| /* Entire leaf level fit in root data. */
|
| dataBufferReplace(&pReader->rootData, pRootData, nRootData);
|
| - leafReaderInit(pReader->rootData.pData, pReader->rootData.nData,
|
| @@ -1273,63 +1176,16 @@ diff -ru ext-orig/fts2/fts2.c ext/fts2/fts2.c
|
| }else{
|
| sqlite3_stmt *s;
|
| int rc = sql_get_leaf_statement(v, idx, &s);
|
| - if( rc!=SQLITE_OK ) return rc;
|
| -
|
| - rc = sqlite3_bind_int64(s, 1, iStartBlockid);
|
| -- if( rc!=SQLITE_OK ) return rc;
|
| -+ if( rc!=SQLITE_OK ) goto err;
|
| -
|
| - rc = sqlite3_bind_int64(s, 2, iEndBlockid);
|
| -- if( rc!=SQLITE_OK ) return rc;
|
| -+ if( rc!=SQLITE_OK ) goto err;
|
| -
|
| - rc = sqlite3_step(s);
|
| -+
|
| -+ /* Corrupt if interior node referenced missing leaf node. */
|
| - if( rc==SQLITE_DONE ){
|
| -- pReader->eof = 1;
|
| -- return SQLITE_OK;
|
| -+ rc = SQLITE_CORRUPT_BKPT;
|
| -+ goto err;
|
| -+ }
|
| -+
|
| -+ if( rc!=SQLITE_ROW ) goto err;
|
| -+ rc = SQLITE_OK;
|
| -+
|
| -+ /* Corrupt if leaf data isn't a blob. */
|
| -+ if( sqlite3_column_type(s, 0)!=SQLITE_BLOB ){
|
| -+ rc = SQLITE_CORRUPT_BKPT;
|
| -+ }else{
|
| -+ const char *pLeafData = sqlite3_column_blob(s, 0);
|
| -+ int nLeafData = sqlite3_column_bytes(s, 0);
|
| -+
|
| -+ /* Corrupt if this can't be a leaf node. */
|
| -+ if( pLeafData==NULL || nLeafData<1 || pLeafData[0]!='\0' ){
|
| -+ rc = SQLITE_CORRUPT_BKPT;
|
| -+ }else{
|
| +@@ -5174,7 +5372,7 @@ static int leavesReaderInit(fulltext_vtab *v,
|
| + if( pLeafData==NULL || nLeafData<1 || pLeafData[0]!='\0' ){
|
| + rc = SQLITE_CORRUPT_BKPT;
|
| + }else{
|
| +- leafReaderInit(pLeafData, nLeafData, &pReader->leafReader);
|
| + rc = leafReaderInit(pLeafData, nLeafData, &pReader->leafReader);
|
| -+ }
|
| -+ }
|
| -+
|
| -+ err:
|
| -+ if( rc!=SQLITE_OK ){
|
| -+ if( idx==-1 ){
|
| -+ sqlite3_finalize(s);
|
| -+ }else{
|
| -+ sqlite3_reset(s);
|
| -+ }
|
| -+ return rc;
|
| + }
|
| }
|
| -- if( rc!=SQLITE_ROW ) return rc;
|
|
|
| - pReader->pStmt = s;
|
| -- leafReaderInit(sqlite3_column_blob(pReader->pStmt, 0),
|
| -- sqlite3_column_bytes(pReader->pStmt, 0),
|
| -- &pReader->leafReader);
|
| - }
|
| - return SQLITE_OK;
|
| - }
|
| -@@ -5122,11 +5395,12 @@
|
| +@@ -5197,11 +5395,12 @@ static int leavesReaderInit(fulltext_vtab *v,
|
| ** end of the current leaf, step forward to the next leaf block.
|
| */
|
| static int leavesReaderStep(fulltext_vtab *v, LeavesReader *pReader){
|
| @@ -1344,66 +1200,27 @@ diff -ru ext-orig/fts2/fts2.c ext/fts2/fts2.c
|
| if( pReader->rootData.pData ){
|
| pReader->eof = 1;
|
| return SQLITE_OK;
|
| -@@ -5136,10 +5410,25 @@
|
| - pReader->eof = 1;
|
| - return rc==SQLITE_DONE ? SQLITE_OK : rc;
|
| - }
|
| -- leafReaderDestroy(&pReader->leafReader);
|
| -- leafReaderInit(sqlite3_column_blob(pReader->pStmt, 0),
|
| -- sqlite3_column_bytes(pReader->pStmt, 0),
|
| -- &pReader->leafReader);
|
| -+
|
| -+ /* Corrupt if leaf data isn't a blob. */
|
| -+ if( sqlite3_column_type(pReader->pStmt, 0)!=SQLITE_BLOB ){
|
| -+ return SQLITE_CORRUPT_BKPT;
|
| -+ }else{
|
| +@@ -5216,6 +5415,7 @@ static int leavesReaderStep(fulltext_vtab *v, LeavesReader *pReader){
|
| + if( sqlite3_column_type(pReader->pStmt, 0)!=SQLITE_BLOB ){
|
| + return SQLITE_CORRUPT_BKPT;
|
| + }else{
|
| + LeafReader tmp;
|
| -+ const char *pLeafData = sqlite3_column_blob(pReader->pStmt, 0);
|
| -+ int nLeafData = sqlite3_column_bytes(pReader->pStmt, 0);
|
| -+
|
| -+ /* Corrupt if this can't be a leaf node. */
|
| -+ if( pLeafData==NULL || nLeafData<1 || pLeafData[0]!='\0' ){
|
| -+ return SQLITE_CORRUPT_BKPT;
|
| -+ }
|
| -+
|
| + const char *pLeafData = sqlite3_column_blob(pReader->pStmt, 0);
|
| + int nLeafData = sqlite3_column_bytes(pReader->pStmt, 0);
|
| +
|
| +@@ -5224,8 +5424,10 @@ static int leavesReaderStep(fulltext_vtab *v, LeavesReader *pReader){
|
| + return SQLITE_CORRUPT_BKPT;
|
| + }
|
| +
|
| + rc = leafReaderInit(pLeafData, nLeafData, &tmp);
|
| + if( rc!=SQLITE_OK ) return rc;
|
| -+ leafReaderDestroy(&pReader->leafReader);
|
| + leafReaderDestroy(&pReader->leafReader);
|
| +- leafReaderInit(pLeafData, nLeafData, &pReader->leafReader);
|
| + pReader->leafReader = tmp;
|
| -+ }
|
| - }
|
| - return SQLITE_OK;
|
| - }
|
| -@@ -5200,8 +5489,19 @@
|
| - sqlite_int64 iEnd = sqlite3_column_int64(s, 1);
|
| - const char *pRootData = sqlite3_column_blob(s, 2);
|
| - int nRootData = sqlite3_column_bytes(s, 2);
|
| -+ sqlite_int64 iIndex = sqlite3_column_int64(s, 3);
|
| -+
|
| -+ /* Corrupt if we get back different types than we stored. */
|
| -+ /* Also corrupt if the index is not sequential starting at 0. */
|
| -+ if( sqlite3_column_type(s, 0)!=SQLITE_INTEGER ||
|
| -+ sqlite3_column_type(s, 1)!=SQLITE_INTEGER ||
|
| -+ sqlite3_column_type(s, 2)!=SQLITE_BLOB ||
|
| -+ i!=iIndex ||
|
| -+ i>=MERGE_COUNT ){
|
| -+ rc = SQLITE_CORRUPT_BKPT;
|
| -+ break;
|
| -+ }
|
| -
|
| -- assert( i<MERGE_COUNT );
|
| - rc = leavesReaderInit(v, i, iStart, iEnd, pRootData, nRootData,
|
| - &pReaders[i]);
|
| - if( rc!=SQLITE_OK ) break;
|
| -@@ -5212,6 +5512,7 @@
|
| - while( i-->0 ){
|
| - leavesReaderDestroy(&pReaders[i]);
|
| }
|
| -+ sqlite3_reset(s); /* So we don't leave a lock. */
|
| - return rc;
|
| }
|
| -
|
| -@@ -5235,13 +5536,26 @@
|
| + return SQLITE_OK;
|
| +@@ -5334,13 +5536,26 @@ static int leavesReadersMerge(fulltext_vtab *v,
|
| DLReader dlReaders[MERGE_COUNT];
|
| const char *pTerm = leavesReaderTerm(pReaders);
|
| int i, nTerm = leavesReaderTermBytes(pReaders);
|
| @@ -1426,30 +1243,14 @@ diff -ru ext-orig/fts2/fts2.c ext/fts2/fts2.c
|
| + if( rc!=SQLITE_OK ) break;
|
| + }
|
| + if( rc!=SQLITE_OK ){
|
| -+ while( i-->0 ){
|
| ++ while( i-->0 ){
|
| + dlrDestroy(&dlReaders[i]);
|
| + }
|
| + return rc;
|
| }
|
|
|
| return leafWriterStepMerge(v, pWriter, pTerm, nTerm, dlReaders, nReaders);
|
| -@@ -5295,10 +5609,14 @@
|
| - memset(&lrs, '\0', sizeof(lrs));
|
| - rc = leavesReadersInit(v, iLevel, lrs, &i);
|
| - if( rc!=SQLITE_OK ) return rc;
|
| -- assert( i==MERGE_COUNT );
|
| -
|
| - leafWriterInit(iLevel+1, idx, &writer);
|
| -
|
| -+ if( i!=MERGE_COUNT ){
|
| -+ rc = SQLITE_CORRUPT_BKPT;
|
| -+ goto err;
|
| -+ }
|
| -+
|
| - /* Since leavesReaderReorder() pushes readers at eof to the end,
|
| - ** when the first reader is empty, all will be empty.
|
| - */
|
| -@@ -5341,12 +5659,14 @@
|
| +@@ -5444,12 +5659,14 @@ static int segmentMerge(fulltext_vtab *v, int iLevel){
|
| }
|
|
|
| /* Accumulate the union of *acc and *pData into *acc. */
|
| @@ -1467,7 +1268,7 @@ diff -ru ext-orig/fts2/fts2.c ext/fts2/fts2.c
|
| }
|
|
|
| /* TODO(shess) It might be interesting to explore different merge
|
| -@@ -5388,8 +5708,13 @@
|
| +@@ -5491,8 +5708,13 @@ static int loadSegmentLeavesInt(fulltext_vtab *v, LeavesReader *pReader,
|
| int c = leafReaderTermCmp(&pReader->leafReader, pTerm, nTerm, isPrefix);
|
| if( c>0 ) break; /* Past any possible matches. */
|
| if( c==0 ){
|
| @@ -1482,7 +1283,7 @@ diff -ru ext-orig/fts2/fts2.c ext/fts2/fts2.c
|
|
|
| /* Find the first empty buffer. */
|
| for(iBuffer=0; iBuffer<nBuffers; ++iBuffer){
|
| -@@ -5435,11 +5760,13 @@
|
| +@@ -5538,11 +5760,13 @@ static int loadSegmentLeavesInt(fulltext_vtab *v, LeavesReader *pReader,
|
| ** with pData/nData.
|
| */
|
| dataBufferSwap(p, pAcc);
|
| @@ -1498,7 +1299,7 @@ diff -ru ext-orig/fts2/fts2.c ext/fts2/fts2.c
|
|
|
| /* dataBufferReset() could allow a large doclist to blow up
|
| ** our memory requirements.
|
| -@@ -5464,13 +5791,15 @@
|
| +@@ -5567,13 +5791,15 @@ static int loadSegmentLeavesInt(fulltext_vtab *v, LeavesReader *pReader,
|
| if( out->nData==0 ){
|
| dataBufferSwap(out, &(pBuffers[iBuffer]));
|
| }else{
|
| @@ -1516,7 +1317,7 @@ diff -ru ext-orig/fts2/fts2.c ext/fts2/fts2.c
|
| while( nBuffers-- ){
|
| dataBufferDestroy(&(pBuffers[nBuffers]));
|
| }
|
| -@@ -5529,20 +5858,26 @@
|
| +@@ -5632,20 +5858,26 @@ static int loadSegmentLeaves(fulltext_vtab *v,
|
| ** node. Consider whether breaking symmetry is worthwhile. I suspect
|
| ** it is not worthwhile.
|
| */
|
| @@ -1549,7 +1350,7 @@ diff -ru ext-orig/fts2/fts2.c ext/fts2/fts2.c
|
| }
|
| *piStartChild = interiorReaderCurrentBlockid(&reader);
|
|
|
| -@@ -5552,7 +5887,11 @@
|
| +@@ -5655,7 +5887,11 @@ static void getChildrenContaining(const char *pData, int nData,
|
| */
|
| while( !interiorReaderAtEnd(&reader) ){
|
| if( interiorReaderTermCmp(&reader, pTerm, nTerm, isPrefix)>0 ) break;
|
| @@ -1562,7 +1363,7 @@ diff -ru ext-orig/fts2/fts2.c ext/fts2/fts2.c
|
| }
|
| *piEndChild = interiorReaderCurrentBlockid(&reader);
|
|
|
| -@@ -5561,6 +5900,7 @@
|
| +@@ -5664,6 +5900,7 @@ static void getChildrenContaining(const char *pData, int nData,
|
| /* Children must ascend, and if !prefix, both must be the same. */
|
| assert( *piEndChild>=*piStartChild );
|
| assert( isPrefix || *piStartChild==*piEndChild );
|
| @@ -1570,42 +1371,22 @@ diff -ru ext-orig/fts2/fts2.c ext/fts2/fts2.c
|
| }
|
|
|
| /* Read block at iBlockid and pass it with other params to
|
| -@@ -5588,11 +5928,31 @@
|
| - if( rc!=SQLITE_OK ) return rc;
|
| -
|
| - rc = sqlite3_step(s);
|
| -- if( rc==SQLITE_DONE ) return SQLITE_ERROR;
|
| -+ /* Corrupt if interior node references missing child node. */
|
| -+ if( rc==SQLITE_DONE ) return SQLITE_CORRUPT_BKPT;
|
| - if( rc!=SQLITE_ROW ) return rc;
|
| -
|
| -- getChildrenContaining(sqlite3_column_blob(s, 0), sqlite3_column_bytes(s, 0),
|
| -- pTerm, nTerm, isPrefix, piStartChild, piEndChild);
|
| -+ /* Corrupt if child node isn't a blob. */
|
| -+ if( sqlite3_column_type(s, 0)!=SQLITE_BLOB ){
|
| -+ sqlite3_reset(s); /* So we don't leave a lock. */
|
| -+ return SQLITE_CORRUPT_BKPT;
|
| -+ }else{
|
| -+ const char *pData = sqlite3_column_blob(s, 0);
|
| -+ int nData = sqlite3_column_bytes(s, 0);
|
| -+
|
| -+ /* Corrupt if child is not a valid interior node. */
|
| -+ if( pData==NULL || nData<1 || pData[0]=='\0' ){
|
| -+ sqlite3_reset(s); /* So we don't leave a lock. */
|
| -+ return SQLITE_CORRUPT_BKPT;
|
| -+ }
|
| -+
|
| +@@ -5709,8 +5946,12 @@ static int loadAndGetChildrenContaining(
|
| + return SQLITE_CORRUPT_BKPT;
|
| + }
|
| +
|
| +- getChildrenContaining(pData, nData, pTerm, nTerm,
|
| +- isPrefix, piStartChild, piEndChild);
|
| + rc = getChildrenContaining(pData, nData, pTerm, nTerm,
|
| + isPrefix, piStartChild, piEndChild);
|
| + if( rc!=SQLITE_OK ){
|
| + sqlite3_reset(s);
|
| + return rc;
|
| + }
|
| -+ }
|
| + }
|
|
|
| /* We expect only one row. We must execute another sqlite3_step()
|
| - * to complete the iteration; otherwise the table will remain
|
| -@@ -5622,8 +5982,9 @@
|
| +@@ -5741,8 +5982,9 @@ static int loadSegmentInt(fulltext_vtab *v, const char *pData, int nData,
|
| /* Process pData as an interior node, then loop down the tree
|
| ** until we find the set of leaf nodes to scan for the term.
|
| */
|
| @@ -1617,17 +1398,7 @@ diff -ru ext-orig/fts2/fts2.c ext/fts2/fts2.c
|
| while( iStartChild>iLeavesEnd ){
|
| sqlite_int64 iNextStart, iNextEnd;
|
| rc = loadAndGetChildrenContaining(v, iStartChild, pTerm, nTerm, isPrefix,
|
| -@@ -5675,7 +6036,8 @@
|
| - DataBuffer result;
|
| - int rc;
|
| -
|
| -- assert( nData>1 );
|
| -+ /* Corrupt if segment root can't be valid. */
|
| -+ if( pData==NULL || nData<1 ) return SQLITE_CORRUPT_BKPT;
|
| -
|
| - /* This code should never be called with buffered updates. */
|
| - assert( v->nPendingData<0 );
|
| -@@ -5692,16 +6054,21 @@
|
| +@@ -5812,16 +6054,21 @@ static int loadSegment(fulltext_vtab *v, const char *pData, int nData,
|
| DataBuffer merged;
|
| DLReader readers[2];
|
|
|
| @@ -1657,20 +1428,7 @@ diff -ru ext-orig/fts2/fts2.c ext/fts2/fts2.c
|
| dataBufferDestroy(&result);
|
| return rc;
|
| }
|
| -@@ -5729,11 +6096,20 @@
|
| - const char *pData = sqlite3_column_blob(s, 2);
|
| - const int nData = sqlite3_column_bytes(s, 2);
|
| - const sqlite_int64 iLeavesEnd = sqlite3_column_int64(s, 1);
|
| -+
|
| -+ /* Corrupt if we get back different types than we stored. */
|
| -+ if( sqlite3_column_type(s, 1)!=SQLITE_INTEGER ||
|
| -+ sqlite3_column_type(s, 2)!=SQLITE_BLOB ){
|
| -+ rc = SQLITE_CORRUPT_BKPT;
|
| -+ goto err;
|
| -+ }
|
| -+
|
| - rc = loadSegment(v, pData, nData, iLeavesEnd, pTerm, nTerm, isPrefix,
|
| - &doclist);
|
| +@@ -5862,6 +6109,7 @@ static int termSelect(fulltext_vtab *v, int iColumn,
|
| if( rc!=SQLITE_OK ) goto err;
|
| }
|
| if( rc==SQLITE_DONE ){
|
| @@ -1678,7 +1436,7 @@ diff -ru ext-orig/fts2/fts2.c ext/fts2/fts2.c
|
| if( doclist.nData!=0 ){
|
| /* TODO(shess) The old term_select_all() code applied the column
|
| ** restrict as we merged segments, leading to smaller buffers.
|
| -@@ -5741,13 +6117,13 @@
|
| +@@ -5869,10 +6117,9 @@ static int termSelect(fulltext_vtab *v, int iColumn,
|
| ** system is checked in.
|
| */
|
| if( iColumn==v->nColumn) iColumn = -1;
|
| @@ -1691,11 +1449,7 @@ diff -ru ext-orig/fts2/fts2.c ext/fts2/fts2.c
|
| }
|
|
|
| err:
|
| -+ sqlite3_reset(s); /* So we don't leave a lock. */
|
| - dataBufferDestroy(&doclist);
|
| - return rc;
|
| - }
|
| -@@ -6089,6 +6465,7 @@
|
| +@@ -6218,6 +6465,7 @@ static int optimizeInternal(fulltext_vtab *v,
|
| LeafWriter *pWriter){
|
| int i, rc = SQLITE_OK;
|
| DataBuffer doclist, merged, tmp;
|
| @@ -1703,7 +1457,7 @@ diff -ru ext-orig/fts2/fts2.c ext/fts2/fts2.c
|
|
|
| /* Order the readers. */
|
| i = nReaders;
|
| -@@ -6109,14 +6486,21 @@
|
| +@@ -6238,14 +6486,21 @@ static int optimizeInternal(fulltext_vtab *v,
|
| if( 0!=optLeavesReaderTermCmp(&readers[0], &readers[i]) ) break;
|
| }
|
|
|
| @@ -1729,7 +1483,7 @@ diff -ru ext-orig/fts2/fts2.c ext/fts2/fts2.c
|
| }else{
|
| DLReader dlReaders[MERGE_COUNT];
|
| int iReader, nReaders;
|
| -@@ -6124,9 +6508,10 @@
|
| +@@ -6253,9 +6508,10 @@ static int optimizeInternal(fulltext_vtab *v,
|
| /* Prime the pipeline with the first reader's doclist. After
|
| ** one pass index 0 will reference the accumulated doclist.
|
| */
|
| @@ -1743,7 +1497,7 @@ diff -ru ext-orig/fts2/fts2.c ext/fts2/fts2.c
|
| iReader = 1;
|
|
|
| assert( iReader<i ); /* Must execute the loop at least once. */
|
| -@@ -6134,24 +6519,35 @@
|
| +@@ -6263,24 +6519,35 @@ static int optimizeInternal(fulltext_vtab *v,
|
| /* Merge 16 inputs per pass. */
|
| for( nReaders=1; iReader<i && nReaders<MERGE_COUNT;
|
| iReader++, nReaders++ ){
|
| @@ -1788,7 +1542,7 @@ diff -ru ext-orig/fts2/fts2.c ext/fts2/fts2.c
|
| }
|
|
|
| /* Destroy reader that was left in the pipeline. */
|
| -@@ -6159,8 +6555,9 @@
|
| +@@ -6288,8 +6555,9 @@ static int optimizeInternal(fulltext_vtab *v,
|
|
|
| /* Trim deletions from the doclist. */
|
| dataBufferReset(&merged);
|
| @@ -1800,52 +1554,7 @@ diff -ru ext-orig/fts2/fts2.c ext/fts2/fts2.c
|
| }
|
|
|
| /* Only pass doclists with hits (skip if all hits deleted). */
|
| -@@ -6240,6 +6637,14 @@
|
| - const char *pRootData = sqlite3_column_blob(s, 2);
|
| - int nRootData = sqlite3_column_bytes(s, 2);
|
| -
|
| -+ /* Corrupt if we get back different types than we stored. */
|
| -+ if( sqlite3_column_type(s, 0)!=SQLITE_INTEGER ||
|
| -+ sqlite3_column_type(s, 1)!=SQLITE_INTEGER ||
|
| -+ sqlite3_column_type(s, 2)!=SQLITE_BLOB ){
|
| -+ rc = SQLITE_CORRUPT_BKPT;
|
| -+ break;
|
| -+ }
|
| -+
|
| - assert( i<nReaders );
|
| - rc = leavesReaderInit(v, -1, iStart, iEnd, pRootData, nRootData,
|
| - &readers[i].reader);
|
| -@@ -6253,6 +6658,8 @@
|
| - if( rc==SQLITE_DONE ){
|
| - assert( i==nReaders );
|
| - rc = optimizeInternal(v, readers, nReaders, &writer);
|
| -+ }else{
|
| -+ sqlite3_reset(s); /* So we don't leave a lock. */
|
| - }
|
| -
|
| - while( i-- > 0 ){
|
| -@@ -6316,9 +6723,18 @@
|
| - const sqlite_int64 iEndBlockid = sqlite3_column_int64(s, 1);
|
| - const char *pRootData = sqlite3_column_blob(s, 2);
|
| - const int nRootData = sqlite3_column_bytes(s, 2);
|
| -+ int rc;
|
| - LeavesReader reader;
|
| -- int rc = leavesReaderInit(v, 0, iStartBlockid, iEndBlockid,
|
| -- pRootData, nRootData, &reader);
|
| -+
|
| -+ /* Corrupt if we get back different types than we stored. */
|
| -+ if( sqlite3_column_type(s, 0)!=SQLITE_INTEGER ||
|
| -+ sqlite3_column_type(s, 1)!=SQLITE_INTEGER ||
|
| -+ sqlite3_column_type(s, 2)!=SQLITE_BLOB ){
|
| -+ return SQLITE_CORRUPT_BKPT;
|
| -+ }
|
| -+
|
| -+ rc = leavesReaderInit(v, 0, iStartBlockid, iEndBlockid,
|
| -+ pRootData, nRootData, &reader);
|
| - if( rc!=SQLITE_OK ) return rc;
|
| -
|
| - while( rc==SQLITE_OK && !leavesReaderAtEnd(&reader) ){
|
| -@@ -6480,16 +6896,19 @@
|
| +@@ -6628,16 +6896,19 @@ static void createDoclistResult(sqlite3_context *pContext,
|
| const char *pData, int nData){
|
| DataBuffer dump;
|
| DLReader dlReader;
|
| @@ -1854,7 +1563,7 @@ diff -ru ext-orig/fts2/fts2.c ext/fts2/fts2.c
|
| assert( pData!=NULL && nData>0 );
|
|
|
| + rc = dlrInit(&dlReader, DL_DEFAULT, pData, nData);
|
| -+ if( rc!=SQLITE_OK ) return rc;
|
| ++ if( rc!=SQLITE_OK ) return;
|
| dataBufferInit(&dump, 0);
|
| - dlrInit(&dlReader, DL_DEFAULT, pData, nData);
|
| - for( ; !dlrAtEnd(&dlReader); dlrStep(&dlReader) ){
|
| @@ -1868,7 +1577,7 @@ diff -ru ext-orig/fts2/fts2.c ext/fts2/fts2.c
|
| if( DL_DEFAULT==DL_DOCIDS || plrAtEnd(&plReader) ){
|
| sqlite3_snprintf(sizeof(buf), buf, "[%lld] ", dlrDocid(&dlReader));
|
| dataBufferAppend(&dump, buf, strlen(buf));
|
| -@@ -6500,7 +6919,8 @@
|
| +@@ -6648,7 +6919,8 @@ static void createDoclistResult(sqlite3_context *pContext,
|
| dlrDocid(&dlReader), iColumn);
|
| dataBufferAppend(&dump, buf, strlen(buf));
|
|
|
| @@ -1878,7 +1587,7 @@ diff -ru ext-orig/fts2/fts2.c ext/fts2/fts2.c
|
| if( plrColumn(&plReader)!=iColumn ){
|
| iColumn = plrColumn(&plReader);
|
| sqlite3_snprintf(sizeof(buf), buf, "] %d[", iColumn);
|
| -@@ -6521,6 +6941,7 @@
|
| +@@ -6669,6 +6941,7 @@ static void createDoclistResult(sqlite3_context *pContext,
|
| dataBufferAppend(&dump, buf, strlen(buf));
|
| }
|
| plrDestroy(&plReader);
|
| @@ -1886,65 +1595,17 @@ diff -ru ext-orig/fts2/fts2.c ext/fts2/fts2.c
|
|
|
| assert( dump.nData>0 );
|
| dump.nData--; /* Overwrite trailing space. */
|
| -@@ -6529,6 +6950,10 @@
|
| +@@ -6677,6 +6950,10 @@ static void createDoclistResult(sqlite3_context *pContext,
|
| }
|
| }
|
| dlrDestroy(&dlReader);
|
| + if( rc!=SQLITE_OK ){
|
| + dataBufferDestroy(&dump);
|
| -+ return rc;
|
| ++ return;
|
| + }
|
|
|
| assert( dump.nData>0 );
|
| dump.nData--; /* Overwrite trailing space. */
|
| -@@ -6540,6 +6965,7 @@
|
| - sqlite3_result_text(pContext, dump.pData, dump.nData, sqlite3_free);
|
| - dump.pData = NULL;
|
| - dump.nData = dump.nCapacity = 0;
|
| -+ return SQLITE_OK;
|
| - }
|
| -
|
| - /* Implements dump_doclist() for use in inspecting the fts2 index from
|
| -@@ -6822,7 +7248,11 @@
|
| - ** module with sqlite.
|
| - */
|
| - if( SQLITE_OK==rc
|
| -+#if GEARS_FTS2_CHANGES && !SQLITE_TEST
|
| -+ /* fts2_tokenizer() disabled for security reasons. */
|
| -+#else
|
| - && SQLITE_OK==(rc = sqlite3Fts2InitHashTable(db, pHash, "fts2_tokenizer"))
|
| -+#endif
|
| - && SQLITE_OK==(rc = sqlite3_overload_function(db, "snippet", -1))
|
| - && SQLITE_OK==(rc = sqlite3_overload_function(db, "offsets", -1))
|
| - && SQLITE_OK==(rc = sqlite3_overload_function(db, "optimize", -1))
|
| -diff -ru ext-orig/fts2/fts2_icu.c ext/fts2/fts2_icu.c
|
| ---- ext-orig/fts2/fts2_icu.c 2009-09-03 13:32:06.000000000 -0700
|
| -+++ ext/fts2/fts2_icu.c 2009-09-18 14:39:41.000000000 -0700
|
| -@@ -198,7 +198,7 @@
|
| -
|
| - while( iStart<iEnd ){
|
| - int iWhite = iStart;
|
| -- U8_NEXT(pCsr->aChar, iWhite, pCsr->nChar, c);
|
| -+ U16_NEXT(pCsr->aChar, iWhite, pCsr->nChar, c);
|
| - if( u_isspace(c) ){
|
| - iStart = iWhite;
|
| - }else{
|
| -diff -ru ext-orig/fts2/fts2_tokenizer.c ext/fts2/fts2_tokenizer.c
|
| ---- ext-orig/fts2/fts2_tokenizer.c 2009-09-03 13:32:06.000000000 -0700
|
| -+++ ext/fts2/fts2_tokenizer.c 2009-09-18 14:39:41.000000000 -0700
|
| -@@ -28,11 +28,14 @@
|
| -
|
| - #include "sqlite3.h"
|
| - #include "sqlite3ext.h"
|
| --SQLITE_EXTENSION_INIT1
|
| -+#ifndef SQLITE_CORE
|
| -+ SQLITE_EXTENSION_INIT1
|
| -+#endif
|
| -
|
| - #include "fts2_hash.h"
|
| - #include "fts2_tokenizer.h"
|
| - #include <assert.h>
|
| -+#include <stddef.h>
|
| -
|
| - /*
|
| - ** Implementation of the SQL scalar function for accessing the underlying
|
| +--
|
| +2.2.1
|
| +
|
|
|