Index: third_party/sqlite/src/ext/fts3/fts3.c |
diff --git a/third_party/sqlite/src/ext/fts3/fts3.c b/third_party/sqlite/src/ext/fts3/fts3.c |
index 3e87a403ea930d96f33e15fbc179f4268d06113d..9707e034a2e48313291c3168a968b7ef60cd374c 100644 |
--- a/third_party/sqlite/src/ext/fts3/fts3.c |
+++ b/third_party/sqlite/src/ext/fts3/fts3.c |
@@ -23,9 +23,6 @@ |
** SQLite (in which case SQLITE_ENABLE_FTS3 is defined). |
*/ |
-/* TODO(shess) Consider exporting this comment to an HTML file or the |
-** wiki. |
-*/ |
/* The full-text index is stored in a series of b+tree (-like) |
** structures called segments which map terms to doclists. The |
** structures are like b+trees in layout, but are constructed from the |
@@ -48,30 +45,40 @@ |
** 21 bits - BBA |
** and so on. |
** |
-** This is identical to how sqlite encodes varints (see util.c). |
+** This is similar in concept to how sqlite encodes "varints" but |
+** the encoding is not the same. SQLite varints are big-endian |
+** are are limited to 9 bytes in length whereas FTS3 varints are |
+** little-endian and can be up to 10 bytes in length (in theory). |
+** |
+** Example encodings: |
+** |
+** 1: 0x01 |
+** 127: 0x7f |
+** 128: 0x81 0x00 |
** |
** |
**** Document lists **** |
** A doclist (document list) holds a docid-sorted list of hits for a |
-** given term. Doclists hold docids, and can optionally associate |
-** token positions and offsets with docids. |
+** given term. Doclists hold docids and associated token positions. |
+** A docid is the unique integer identifier for a single document. |
+** A position is the index of a word within the document. The first |
+** word of the document has a position of 0. |
+** |
+** FTS3 used to optionally store character offsets using a compile-time |
+** option. But that functionality is no longer supported. |
** |
-** A DL_POSITIONS_OFFSETS doclist is stored like this: |
+** A doclist is stored like this: |
** |
** array { |
** varint docid; |
** array { (position list for column 0) |
-** varint position; (delta from previous position plus POS_BASE) |
-** varint startOffset; (delta from previous startOffset) |
-** varint endOffset; (delta from startOffset) |
+** varint position; (2 more than the delta from previous position) |
** } |
** array { |
** varint POS_COLUMN; (marks start of position list for new column) |
** varint column; (index of new column) |
** array { |
-** varint position; (delta from previous position plus POS_BASE) |
-** varint startOffset;(delta from previous startOffset) |
-** varint endOffset; (delta from startOffset) |
+** varint position; (2 more than the delta from previous position) |
** } |
** } |
** varint POS_END; (marks end of positions for this document. |
@@ -79,19 +86,32 @@ |
** |
** Here, array { X } means zero or more occurrences of X, adjacent in |
** memory. A "position" is an index of a token in the token stream |
-** generated by the tokenizer, while an "offset" is a byte offset, |
-** both based at 0. Note that POS_END and POS_COLUMN occur in the |
-** same logical place as the position element, and act as sentinals |
-** ending a position list array. |
-** |
-** A DL_POSITIONS doclist omits the startOffset and endOffset |
-** information. A DL_DOCIDS doclist omits both the position and |
-** offset information, becoming an array of varint-encoded docids. |
-** |
-** On-disk data is stored as type DL_DEFAULT, so we don't serialize |
-** the type. Due to how deletion is implemented in the segmentation |
-** system, on-disk doclists MUST store at least positions. |
-** |
+** generated by the tokenizer. Note that POS_END and POS_COLUMN occur |
+** in the same logical place as the position element, and act as sentinals |
+** ending a position list array. POS_END is 0. POS_COLUMN is 1. |
+** The positions numbers are not stored literally but rather as two more |
+** than the difference from the prior position, or the just the position plus |
+** 2 for the first position. Example: |
+** |
+** label: A B C D E F G H I J K |
+** value: 123 5 9 1 1 14 35 0 234 72 0 |
+** |
+** The 123 value is the first docid. For column zero in this document |
+** there are two matches at positions 3 and 10 (5-2 and 9-2+3). The 1 |
+** at D signals the start of a new column; the 1 at E indicates that the |
+** new column is column number 1. There are two positions at 12 and 45 |
+** (14-2 and 35-2+12). The 0 at H indicate the end-of-document. The |
+** 234 at I is the next docid. It has one position 72 (72-2) and then |
+** terminates with the 0 at K. |
+** |
+** A "position-list" is the list of positions for multiple columns for |
+** a single docid. A "column-list" is the set of positions for a single |
+** column. Hence, a position-list consists of one or more column-lists, |
+** a document record consists of a docid followed by a position-list and |
+** a doclist consists of one or more document records. |
+** |
+** A bare doclist omits the position information, becoming an |
+** array of varint-encoded docids. |
** |
**** Segment leaf nodes **** |
** Segment leaf nodes store terms and doclists, ordered by term. Leaf |
@@ -279,125 +299,27 @@ |
# define SQLITE_CORE 1 |
#endif |
+#include "fts3Int.h" |
+ |
#include <assert.h> |
#include <stdlib.h> |
+#include <stddef.h> |
#include <stdio.h> |
#include <string.h> |
-#include <ctype.h> |
+#include <stdarg.h> |
#include "fts3.h" |
-#include "fts3_expr.h" |
-#include "fts3_hash.h" |
-#include "fts3_tokenizer.h" |
#ifndef SQLITE_CORE |
# include "sqlite3ext.h" |
SQLITE_EXTENSION_INIT1 |
#endif |
- |
-/* TODO(shess) MAN, this thing needs some refactoring. At minimum, it |
-** would be nice to order the file better, perhaps something along the |
-** lines of: |
-** |
-** - utility functions |
-** - table setup functions |
-** - table update functions |
-** - table query functions |
-** |
-** Put the query functions last because they're likely to reference |
-** typedefs or functions from the table update section. |
-*/ |
- |
-#if 0 |
-# define FTSTRACE(A) printf A; fflush(stdout) |
-#else |
-# define FTSTRACE(A) |
-#endif |
- |
-#if 0 |
-/* Useful to set breakpoints. See main.c sqlite3Corrupt(). */ |
-static int fts3Corrupt(void){ |
- return SQLITE_CORRUPT; |
-} |
-# define SQLITE_CORRUPT_BKPT fts3Corrupt() |
-#else |
-# define SQLITE_CORRUPT_BKPT SQLITE_CORRUPT |
-#endif |
- |
-/* It is not safe to call isspace(), tolower(), or isalnum() on |
-** hi-bit-set characters. This is the same solution used in the |
-** tokenizer. |
-*/ |
-/* TODO(shess) The snippet-generation code should be using the |
-** tokenizer-generated tokens rather than doing its own local |
-** tokenization. |
-*/ |
-/* TODO(shess) Is __isascii() a portable version of (c&0x80)==0? */ |
-static int safe_isspace(char c){ |
- return (c&0x80)==0 ? isspace(c) : 0; |
-} |
-static int safe_tolower(char c){ |
- return (c>='A' && c<='Z') ? (c-'A'+'a') : c; |
-} |
-static int safe_isalnum(char c){ |
- return (c&0x80)==0 ? isalnum(c) : 0; |
-} |
- |
-typedef enum DocListType { |
- DL_DOCIDS, /* docids only */ |
- DL_POSITIONS, /* docids + positions */ |
- DL_POSITIONS_OFFSETS /* docids + positions + offsets */ |
-} DocListType; |
- |
-/* |
-** By default, only positions and not offsets are stored in the doclists. |
-** To change this so that offsets are stored too, compile with |
-** |
-** -DDL_DEFAULT=DL_POSITIONS_OFFSETS |
-** |
-** If DL_DEFAULT is set to DL_DOCIDS, your table can only be inserted |
-** into (no deletes or updates). |
-*/ |
-#ifndef DL_DEFAULT |
-# define DL_DEFAULT DL_POSITIONS |
-#endif |
- |
-enum { |
- POS_END = 0, /* end of this position list */ |
- POS_COLUMN, /* followed by new column number */ |
- POS_BASE |
-}; |
- |
-/* MERGE_COUNT controls how often we merge segments (see comment at |
-** top of file). |
-*/ |
-#define MERGE_COUNT 16 |
- |
-/* utility functions */ |
- |
-/* CLEAR() and SCRAMBLE() abstract memset() on a pointer to a single |
-** record to prevent errors of the form: |
-** |
-** my_function(SomeType *b){ |
-** memset(b, '\0', sizeof(b)); // sizeof(b)!=sizeof(*b) |
-** } |
+/* |
+** Write a 64-bit variable-length integer to memory starting at p[0]. |
+** The length of data written will be between 1 and FTS3_VARINT_MAX bytes. |
+** The number of bytes written is returned. |
*/ |
-/* TODO(shess) Obvious candidates for a header file. */ |
-#define CLEAR(b) memset(b, '\0', sizeof(*(b))) |
- |
-#ifndef NDEBUG |
-# define SCRAMBLE(b) memset(b, 0x55, sizeof(*(b))) |
-#else |
-# define SCRAMBLE(b) |
-#endif |
- |
-/* We may need up to VARINT_MAX bytes to store an encoded 64-bit integer. */ |
-#define VARINT_MAX 10 |
- |
-/* Write a 64-bit variable-length integer to memory starting at p[0]. |
- * The length of data written will be between 1 and VARINT_MAX bytes. |
- * The number of bytes written is returned. */ |
-static int fts3PutVarint(char *p, sqlite_int64 v){ |
+int sqlite3Fts3PutVarint(char *p, sqlite_int64 v){ |
unsigned char *q = (unsigned char *) p; |
sqlite_uint64 vu = v; |
do{ |
@@ -405,6928 +327,3277 @@ static int fts3PutVarint(char *p, sqlite_int64 v){ |
vu >>= 7; |
}while( vu!=0 ); |
q[-1] &= 0x7f; /* turn off high bit in final byte */ |
- assert( q - (unsigned char *)p <= VARINT_MAX ); |
+ assert( q - (unsigned char *)p <= FTS3_VARINT_MAX ); |
return (int) (q - (unsigned char *)p); |
} |
-/* Read a 64-bit variable-length integer from memory starting at p[0]. |
- * Return the number of bytes read, or 0 on error. |
- * The value is stored in *v. */ |
-static int fts3GetVarintSafe(const char *p, sqlite_int64 *v, int max){ |
+/* |
+** Read a 64-bit variable-length integer from memory starting at p[0]. |
+** Return the number of bytes read, or 0 on error. |
+** The value is stored in *v. |
+*/ |
+int sqlite3Fts3GetVarint(const char *p, sqlite_int64 *v){ |
const unsigned char *q = (const unsigned char *) p; |
sqlite_uint64 x = 0, y = 1; |
- if( max>VARINT_MAX ) max = VARINT_MAX; |
- while( max && (*q & 0x80) == 0x80 ){ |
- max--; |
+ while( (*q&0x80)==0x80 && q-(unsigned char *)p<FTS3_VARINT_MAX ){ |
x += y * (*q++ & 0x7f); |
y <<= 7; |
} |
- if( !max ){ |
- assert( 0 ); |
- return 0; /* tried to read too much; bad data */ |
- } |
x += y * (*q++); |
*v = (sqlite_int64) x; |
return (int) (q - (unsigned char *)p); |
} |
-static int fts3GetVarint(const char *p, sqlite_int64 *v){ |
- return fts3GetVarintSafe(p, v, VARINT_MAX); |
-} |
- |
-static int fts3GetVarint32Safe(const char *p, int *pi, int max){ |
+/* |
+** Similar to sqlite3Fts3GetVarint(), except that the output is truncated to a |
+** 32-bit integer before it is returned. |
+*/ |
+int sqlite3Fts3GetVarint32(const char *p, int *pi){ |
sqlite_int64 i; |
- int ret = fts3GetVarintSafe(p, &i, max); |
- if( !ret ) return ret; |
+ int ret = sqlite3Fts3GetVarint(p, &i); |
*pi = (int) i; |
- assert( *pi==i ); |
return ret; |
} |
-static int fts3GetVarint32(const char* p, int *pi){ |
- return fts3GetVarint32Safe(p, pi, VARINT_MAX); |
+/* |
+** Return the number of bytes required to encode v as a varint |
+*/ |
+int sqlite3Fts3VarintLen(sqlite3_uint64 v){ |
+ int i = 0; |
+ do{ |
+ i++; |
+ v >>= 7; |
+ }while( v!=0 ); |
+ return i; |
} |
-/*******************************************************************/ |
-/* DataBuffer is used to collect data into a buffer in piecemeal |
-** fashion. It implements the usual distinction between amount of |
-** data currently stored (nData) and buffer capacity (nCapacity). |
+/* |
+** Convert an SQL-style quoted string into a normal string by removing |
+** the quote characters. The conversion is done in-place. If the |
+** input does not begin with a quote character, then this routine |
+** is a no-op. |
+** |
+** Examples: |
+** |
+** "abc" becomes abc |
+** 'xyz' becomes xyz |
+** [pqr] becomes pqr |
+** `mno` becomes mno |
** |
-** dataBufferInit - create a buffer with given initial capacity. |
-** dataBufferReset - forget buffer's data, retaining capacity. |
-** dataBufferDestroy - free buffer's data. |
-** dataBufferSwap - swap contents of two buffers. |
-** dataBufferExpand - expand capacity without adding data. |
-** dataBufferAppend - append data. |
-** dataBufferAppend2 - append two pieces of data at once. |
-** dataBufferReplace - replace buffer's data. |
*/ |
-typedef struct DataBuffer { |
- char *pData; /* Pointer to malloc'ed buffer. */ |
- int nCapacity; /* Size of pData buffer. */ |
- int nData; /* End of data loaded into pData. */ |
-} DataBuffer; |
- |
-static void dataBufferInit(DataBuffer *pBuffer, int nCapacity){ |
- assert( nCapacity>=0 ); |
- pBuffer->nData = 0; |
- pBuffer->nCapacity = nCapacity; |
- pBuffer->pData = nCapacity==0 ? NULL : sqlite3_malloc(nCapacity); |
-} |
-static void dataBufferReset(DataBuffer *pBuffer){ |
- pBuffer->nData = 0; |
-} |
-static void dataBufferDestroy(DataBuffer *pBuffer){ |
- if( pBuffer->pData!=NULL ) sqlite3_free(pBuffer->pData); |
- SCRAMBLE(pBuffer); |
-} |
-static void dataBufferSwap(DataBuffer *pBuffer1, DataBuffer *pBuffer2){ |
- DataBuffer tmp = *pBuffer1; |
- *pBuffer1 = *pBuffer2; |
- *pBuffer2 = tmp; |
-} |
-static void dataBufferExpand(DataBuffer *pBuffer, int nAddCapacity){ |
- assert( nAddCapacity>0 ); |
- /* TODO(shess) Consider expanding more aggressively. Note that the |
- ** underlying malloc implementation may take care of such things for |
- ** us already. |
- */ |
- if( pBuffer->nData+nAddCapacity>pBuffer->nCapacity ){ |
- pBuffer->nCapacity = pBuffer->nData+nAddCapacity; |
- pBuffer->pData = sqlite3_realloc(pBuffer->pData, pBuffer->nCapacity); |
+void sqlite3Fts3Dequote(char *z){ |
+ char quote; /* Quote character (if any ) */ |
+ |
+ quote = z[0]; |
+ if( quote=='[' || quote=='\'' || quote=='"' || quote=='`' ){ |
+ int iIn = 1; /* Index of next byte to read from input */ |
+ int iOut = 0; /* Index of next byte to write to output */ |
+ |
+ /* If the first byte was a '[', then the close-quote character is a ']' */ |
+ if( quote=='[' ) quote = ']'; |
+ |
+ while( ALWAYS(z[iIn]) ){ |
+ if( z[iIn]==quote ){ |
+ if( z[iIn+1]!=quote ) break; |
+ z[iOut++] = quote; |
+ iIn += 2; |
+ }else{ |
+ z[iOut++] = z[iIn++]; |
+ } |
+ } |
+ z[iOut] = '\0'; |
} |
} |
-static void dataBufferAppend(DataBuffer *pBuffer, |
- const char *pSource, int nSource){ |
- assert( nSource>0 && pSource!=NULL ); |
- dataBufferExpand(pBuffer, nSource); |
- memcpy(pBuffer->pData+pBuffer->nData, pSource, nSource); |
- pBuffer->nData += nSource; |
-} |
-static void dataBufferAppend2(DataBuffer *pBuffer, |
- const char *pSource1, int nSource1, |
- const char *pSource2, int nSource2){ |
- assert( nSource1>0 && pSource1!=NULL ); |
- assert( nSource2>0 && pSource2!=NULL ); |
- dataBufferExpand(pBuffer, nSource1+nSource2); |
- memcpy(pBuffer->pData+pBuffer->nData, pSource1, nSource1); |
- memcpy(pBuffer->pData+pBuffer->nData+nSource1, pSource2, nSource2); |
- pBuffer->nData += nSource1+nSource2; |
-} |
-static void dataBufferReplace(DataBuffer *pBuffer, |
- const char *pSource, int nSource){ |
- dataBufferReset(pBuffer); |
- dataBufferAppend(pBuffer, pSource, nSource); |
-} |
-/* StringBuffer is a null-terminated version of DataBuffer. */ |
-typedef struct StringBuffer { |
- DataBuffer b; /* Includes null terminator. */ |
-} StringBuffer; |
- |
-static void initStringBuffer(StringBuffer *sb){ |
- dataBufferInit(&sb->b, 100); |
- dataBufferReplace(&sb->b, "", 1); |
-} |
-static int stringBufferLength(StringBuffer *sb){ |
- return sb->b.nData-1; |
-} |
-static char *stringBufferData(StringBuffer *sb){ |
- return sb->b.pData; |
-} |
-static void stringBufferDestroy(StringBuffer *sb){ |
- dataBufferDestroy(&sb->b); |
+/* |
+** Read a single varint from the doclist at *pp and advance *pp to point |
+** to the first byte past the end of the varint. Add the value of the varint |
+** to *pVal. |
+*/ |
+static void fts3GetDeltaVarint(char **pp, sqlite3_int64 *pVal){ |
+ sqlite3_int64 iVal; |
+ *pp += sqlite3Fts3GetVarint(*pp, &iVal); |
+ *pVal += iVal; |
} |
-static void nappend(StringBuffer *sb, const char *zFrom, int nFrom){ |
- assert( sb->b.nData>0 ); |
- if( nFrom>0 ){ |
- sb->b.nData--; |
- dataBufferAppend2(&sb->b, zFrom, nFrom, "", 1); |
+/* |
+** As long as *pp has not reached its end (pEnd), then do the same |
+** as fts3GetDeltaVarint(): read a single varint and add it to *pVal. |
+** But if we have reached the end of the varint, just set *pp=0 and |
+** leave *pVal unchanged. |
+*/ |
+static void fts3GetDeltaVarint2(char **pp, char *pEnd, sqlite3_int64 *pVal){ |
+ if( *pp>=pEnd ){ |
+ *pp = 0; |
+ }else{ |
+ fts3GetDeltaVarint(pp, pVal); |
} |
} |
-static void append(StringBuffer *sb, const char *zFrom){ |
- nappend(sb, zFrom, strlen(zFrom)); |
-} |
-/* Append a list of strings separated by commas. */ |
-static void appendList(StringBuffer *sb, int nString, char **azString){ |
+/* |
+** The xDisconnect() virtual table method. |
+*/ |
+static int fts3DisconnectMethod(sqlite3_vtab *pVtab){ |
+ Fts3Table *p = (Fts3Table *)pVtab; |
int i; |
- for(i=0; i<nString; ++i){ |
- if( i>0 ) append(sb, ", "); |
- append(sb, azString[i]); |
- } |
-} |
- |
-static int endsInWhiteSpace(StringBuffer *p){ |
- return stringBufferLength(p)>0 && |
- safe_isspace(stringBufferData(p)[stringBufferLength(p)-1]); |
-} |
-/* If the StringBuffer ends in something other than white space, add a |
-** single space character to the end. |
-*/ |
-static void appendWhiteSpace(StringBuffer *p){ |
- if( stringBufferLength(p)==0 ) return; |
- if( !endsInWhiteSpace(p) ) append(p, " "); |
-} |
+ assert( p->nPendingData==0 ); |
+ assert( p->pSegments==0 ); |
-/* Remove white space from the end of the StringBuffer */ |
-static void trimWhiteSpace(StringBuffer *p){ |
- while( endsInWhiteSpace(p) ){ |
- p->b.pData[--p->b.nData-1] = '\0'; |
+ /* Free any prepared statements held */ |
+ for(i=0; i<SizeofArray(p->aStmt); i++){ |
+ sqlite3_finalize(p->aStmt[i]); |
} |
+ sqlite3_free(p->zSegmentsTbl); |
+ sqlite3_free(p->zReadExprlist); |
+ sqlite3_free(p->zWriteExprlist); |
+ |
+ /* Invoke the tokenizer destructor to free the tokenizer. */ |
+ p->pTokenizer->pModule->xDestroy(p->pTokenizer); |
+ |
+ sqlite3_free(p); |
+ return SQLITE_OK; |
} |
-/*******************************************************************/ |
-/* DLReader is used to read document elements from a doclist. The |
-** current docid is cached, so dlrDocid() is fast. DLReader does not |
-** own the doclist buffer. |
-** |
-** dlrAtEnd - true if there's no more data to read. |
-** dlrDocid - docid of current document. |
-** dlrDocData - doclist data for current document (including docid). |
-** dlrDocDataBytes - length of same. |
-** dlrAllDataBytes - length of all remaining data. |
-** dlrPosData - position data for current document. |
-** dlrPosDataLen - length of pos data for current document (incl POS_END). |
-** dlrStep - step to current document. |
-** dlrInit - initial for doclist of given type against given data. |
-** dlrDestroy - clean up. |
-** |
-** Expected usage is something like: |
+/* |
+** Construct one or more SQL statements from the format string given |
+** and then evaluate those statements. The success code is written |
+** into *pRc. |
** |
-** DLReader reader; |
-** dlrInit(&reader, pData, nData); |
-** while( !dlrAtEnd(&reader) ){ |
-** // calls to dlrDocid() and kin. |
-** dlrStep(&reader); |
-** } |
-** dlrDestroy(&reader); |
-*/ |
-typedef struct DLReader { |
- DocListType iType; |
- const char *pData; |
- int nData; |
- |
- sqlite_int64 iDocid; |
- int nElement; |
-} DLReader; |
- |
-static int dlrAtEnd(DLReader *pReader){ |
- assert( pReader->nData>=0 ); |
- return pReader->nData<=0; |
-} |
-static sqlite_int64 dlrDocid(DLReader *pReader){ |
- assert( !dlrAtEnd(pReader) ); |
- return pReader->iDocid; |
-} |
-static const char *dlrDocData(DLReader *pReader){ |
- assert( !dlrAtEnd(pReader) ); |
- return pReader->pData; |
-} |
-static int dlrDocDataBytes(DLReader *pReader){ |
- assert( !dlrAtEnd(pReader) ); |
- return pReader->nElement; |
-} |
-static int dlrAllDataBytes(DLReader *pReader){ |
- assert( !dlrAtEnd(pReader) ); |
- return pReader->nData; |
-} |
-/* TODO(shess) Consider adding a field to track iDocid varint length |
-** to make these two functions faster. This might matter (a tiny bit) |
-** for queries. |
+** If *pRc is initially non-zero then this routine is a no-op. |
*/ |
-static const char *dlrPosData(DLReader *pReader){ |
- sqlite_int64 iDummy; |
- int n = fts3GetVarintSafe(pReader->pData, &iDummy, pReader->nElement); |
- if( !n ) return NULL; |
- assert( !dlrAtEnd(pReader) ); |
- return pReader->pData+n; |
+static void fts3DbExec( |
+ int *pRc, /* Success code */ |
+ sqlite3 *db, /* Database in which to run SQL */ |
+ const char *zFormat, /* Format string for SQL */ |
+ ... /* Arguments to the format string */ |
+){ |
+ va_list ap; |
+ char *zSql; |
+ if( *pRc ) return; |
+ va_start(ap, zFormat); |
+ zSql = sqlite3_vmprintf(zFormat, ap); |
+ va_end(ap); |
+ if( zSql==0 ){ |
+ *pRc = SQLITE_NOMEM; |
+ }else{ |
+ *pRc = sqlite3_exec(db, zSql, 0, 0, 0); |
+ sqlite3_free(zSql); |
+ } |
} |
-static int dlrPosDataLen(DLReader *pReader){ |
- sqlite_int64 iDummy; |
- int n = fts3GetVarint(pReader->pData, &iDummy); |
- assert( !dlrAtEnd(pReader) ); |
- return pReader->nElement-n; |
+ |
+/* |
+** The xDestroy() virtual table method. |
+*/ |
+static int fts3DestroyMethod(sqlite3_vtab *pVtab){ |
+ int rc = SQLITE_OK; /* Return code */ |
+ Fts3Table *p = (Fts3Table *)pVtab; |
+ sqlite3 *db = p->db; |
+ |
+ /* Drop the shadow tables */ |
+ fts3DbExec(&rc, db, "DROP TABLE IF EXISTS %Q.'%q_content'", p->zDb, p->zName); |
+ fts3DbExec(&rc, db, "DROP TABLE IF EXISTS %Q.'%q_segments'", p->zDb,p->zName); |
+ fts3DbExec(&rc, db, "DROP TABLE IF EXISTS %Q.'%q_segdir'", p->zDb, p->zName); |
+ fts3DbExec(&rc, db, "DROP TABLE IF EXISTS %Q.'%q_docsize'", p->zDb, p->zName); |
+ fts3DbExec(&rc, db, "DROP TABLE IF EXISTS %Q.'%q_stat'", p->zDb, p->zName); |
+ |
+ /* If everything has worked, invoke fts3DisconnectMethod() to free the |
+ ** memory associated with the Fts3Table structure and return SQLITE_OK. |
+ ** Otherwise, return an SQLite error code. |
+ */ |
+ return (rc==SQLITE_OK ? fts3DisconnectMethod(pVtab) : rc); |
} |
-static int dlrStep(DLReader *pReader){ |
- assert( !dlrAtEnd(pReader) ); |
- |
- /* Skip past current doclist element. */ |
- assert( pReader->nElement<=pReader->nData ); |
- pReader->pData += pReader->nElement; |
- pReader->nData -= pReader->nElement; |
- |
- /* If there is more data, read the next doclist element. */ |
- if( pReader->nData>0 ){ |
- sqlite_int64 iDocidDelta; |
- int nTotal = 0; |
- int iDummy, n = fts3GetVarintSafe(pReader->pData, &iDocidDelta, pReader->nData); |
- if( !n ) return SQLITE_CORRUPT_BKPT; |
- nTotal += n; |
- pReader->iDocid += iDocidDelta; |
- if( pReader->iType>=DL_POSITIONS ){ |
- while( 1 ){ |
- n = fts3GetVarint32Safe(pReader->pData+nTotal, &iDummy, pReader->nData-nTotal); |
- if( !n ) return SQLITE_CORRUPT_BKPT; |
- nTotal += n; |
- if( iDummy==POS_END ) break; |
- if( iDummy==POS_COLUMN ){ |
- n = fts3GetVarint32Safe(pReader->pData+nTotal, &iDummy, pReader->nData-nTotal); |
- if( !n ) return SQLITE_CORRUPT_BKPT; |
- nTotal += n; |
- }else if( pReader->iType==DL_POSITIONS_OFFSETS ){ |
- n = fts3GetVarint32Safe(pReader->pData+nTotal, &iDummy, pReader->nData-nTotal); |
- if( !n ) return SQLITE_CORRUPT_BKPT; |
- nTotal += n; |
- n = fts3GetVarint32Safe(pReader->pData+nTotal, &iDummy, pReader->nData-nTotal); |
- if( !n ) return SQLITE_CORRUPT_BKPT; |
- nTotal += n; |
- } |
- } |
+ |
+ |
+/* |
+** Invoke sqlite3_declare_vtab() to declare the schema for the FTS3 table |
+** passed as the first argument. This is done as part of the xConnect() |
+** and xCreate() methods. |
+** |
+** If *pRc is non-zero when this function is called, it is a no-op. |
+** Otherwise, if an error occurs, an SQLite error code is stored in *pRc |
+** before returning. |
+*/ |
+static void fts3DeclareVtab(int *pRc, Fts3Table *p){ |
+ if( *pRc==SQLITE_OK ){ |
+ int i; /* Iterator variable */ |
+ int rc; /* Return code */ |
+ char *zSql; /* SQL statement passed to declare_vtab() */ |
+ char *zCols; /* List of user defined columns */ |
+ |
+ /* Create a list of user columns for the virtual table */ |
+ zCols = sqlite3_mprintf("%Q, ", p->azColumn[0]); |
+ for(i=1; zCols && i<p->nColumn; i++){ |
+ zCols = sqlite3_mprintf("%z%Q, ", zCols, p->azColumn[i]); |
+ } |
+ |
+ /* Create the whole "CREATE TABLE" statement to pass to SQLite */ |
+ zSql = sqlite3_mprintf( |
+ "CREATE TABLE x(%s %Q HIDDEN, docid HIDDEN)", zCols, p->zName |
+ ); |
+ if( !zCols || !zSql ){ |
+ rc = SQLITE_NOMEM; |
+ }else{ |
+ rc = sqlite3_declare_vtab(p->db, zSql); |
} |
- pReader->nElement = nTotal; |
- assert( pReader->nElement<=pReader->nData ); |
+ |
+ sqlite3_free(zSql); |
+ sqlite3_free(zCols); |
+ *pRc = rc; |
} |
- return SQLITE_OK; |
-} |
-static void dlrDestroy(DLReader *pReader){ |
- SCRAMBLE(pReader); |
} |
-static int dlrInit(DLReader *pReader, DocListType iType, |
- const char *pData, int nData){ |
- int rc; |
- assert( pData!=NULL && nData!=0 ); |
- pReader->iType = iType; |
- pReader->pData = pData; |
- pReader->nData = nData; |
- pReader->nElement = 0; |
- pReader->iDocid = 0; |
- |
- /* Load the first element's data. There must be a first element. */ |
- rc = dlrStep(pReader); |
- if( rc!=SQLITE_OK ) dlrDestroy(pReader); |
+ |
+/* |
+** Create the backing store tables (%_content, %_segments and %_segdir) |
+** required by the FTS3 table passed as the only argument. This is done |
+** as part of the vtab xCreate() method. |
+** |
+** If the p->bHasDocsize boolean is true (indicating that this is an |
+** FTS4 table, not an FTS3 table) then also create the %_docsize and |
+** %_stat tables required by FTS4. |
+*/ |
+static int fts3CreateTables(Fts3Table *p){ |
+ int rc = SQLITE_OK; /* Return code */ |
+ int i; /* Iterator variable */ |
+ char *zContentCols; /* Columns of %_content table */ |
+ sqlite3 *db = p->db; /* The database connection */ |
+ |
+ /* Create a list of user columns for the content table */ |
+ zContentCols = sqlite3_mprintf("docid INTEGER PRIMARY KEY"); |
+ for(i=0; zContentCols && i<p->nColumn; i++){ |
+ char *z = p->azColumn[i]; |
+ zContentCols = sqlite3_mprintf("%z, 'c%d%q'", zContentCols, i, z); |
+ } |
+ if( zContentCols==0 ) rc = SQLITE_NOMEM; |
+ |
+ /* Create the content table */ |
+ fts3DbExec(&rc, db, |
+ "CREATE TABLE %Q.'%q_content'(%s)", |
+ p->zDb, p->zName, zContentCols |
+ ); |
+ sqlite3_free(zContentCols); |
+ /* Create other tables */ |
+ fts3DbExec(&rc, db, |
+ "CREATE TABLE %Q.'%q_segments'(blockid INTEGER PRIMARY KEY, block BLOB);", |
+ p->zDb, p->zName |
+ ); |
+ fts3DbExec(&rc, db, |
+ "CREATE TABLE %Q.'%q_segdir'(" |
+ "level INTEGER," |
+ "idx INTEGER," |
+ "start_block INTEGER," |
+ "leaves_end_block INTEGER," |
+ "end_block INTEGER," |
+ "root BLOB," |
+ "PRIMARY KEY(level, idx)" |
+ ");", |
+ p->zDb, p->zName |
+ ); |
+ if( p->bHasDocsize ){ |
+ fts3DbExec(&rc, db, |
+ "CREATE TABLE %Q.'%q_docsize'(docid INTEGER PRIMARY KEY, size BLOB);", |
+ p->zDb, p->zName |
+ ); |
+ } |
+ if( p->bHasStat ){ |
+ fts3DbExec(&rc, db, |
+ "CREATE TABLE %Q.'%q_stat'(id INTEGER PRIMARY KEY, value BLOB);", |
+ p->zDb, p->zName |
+ ); |
+ } |
return rc; |
} |
-#ifndef NDEBUG |
-/* Verify that the doclist can be validly decoded. Also returns the |
-** last docid found because it is convenient in other assertions for |
-** DLWriter. |
+/* |
+** Store the current database page-size in bytes in p->nPgsz. |
+** |
+** If *pRc is non-zero when this function is called, it is a no-op. |
+** Otherwise, if an error occurs, an SQLite error code is stored in *pRc |
+** before returning. |
*/ |
-static void docListValidate(DocListType iType, const char *pData, int nData, |
- sqlite_int64 *pLastDocid){ |
- sqlite_int64 iPrevDocid = 0; |
- assert( nData>0 ); |
- assert( pData!=0 ); |
- assert( pData+nData>pData ); |
- while( nData!=0 ){ |
- sqlite_int64 iDocidDelta; |
- int n = fts3GetVarint(pData, &iDocidDelta); |
- iPrevDocid += iDocidDelta; |
- if( iType>DL_DOCIDS ){ |
- int iDummy; |
- while( 1 ){ |
- n += fts3GetVarint32(pData+n, &iDummy); |
- if( iDummy==POS_END ) break; |
- if( iDummy==POS_COLUMN ){ |
- n += fts3GetVarint32(pData+n, &iDummy); |
- }else if( iType>DL_POSITIONS ){ |
- n += fts3GetVarint32(pData+n, &iDummy); |
- n += fts3GetVarint32(pData+n, &iDummy); |
- } |
- assert( n<=nData ); |
+static void fts3DatabasePageSize(int *pRc, Fts3Table *p){ |
+ if( *pRc==SQLITE_OK ){ |
+ int rc; /* Return code */ |
+ char *zSql; /* SQL text "PRAGMA %Q.page_size" */ |
+ sqlite3_stmt *pStmt; /* Compiled "PRAGMA %Q.page_size" statement */ |
+ |
+ zSql = sqlite3_mprintf("PRAGMA %Q.page_size", p->zDb); |
+ if( !zSql ){ |
+ rc = SQLITE_NOMEM; |
+ }else{ |
+ rc = sqlite3_prepare(p->db, zSql, -1, &pStmt, 0); |
+ if( rc==SQLITE_OK ){ |
+ sqlite3_step(pStmt); |
+ p->nPgsz = sqlite3_column_int(pStmt, 0); |
+ rc = sqlite3_finalize(pStmt); |
} |
} |
- assert( n<=nData ); |
- pData += n; |
- nData -= n; |
+ assert( p->nPgsz>0 || rc!=SQLITE_OK ); |
+ sqlite3_free(zSql); |
+ *pRc = rc; |
} |
- if( pLastDocid ) *pLastDocid = iPrevDocid; |
} |
-#define ASSERT_VALID_DOCLIST(i, p, n, o) docListValidate(i, p, n, o) |
-#else |
-#define ASSERT_VALID_DOCLIST(i, p, n, o) assert( 1 ) |
-#endif |
-/*******************************************************************/ |
-/* DLWriter is used to write doclist data to a DataBuffer. DLWriter |
-** always appends to the buffer and does not own it. |
+/* |
+** "Special" FTS4 arguments are column specifications of the following form: |
** |
-** dlwInit - initialize to write a given type doclistto a buffer. |
-** dlwDestroy - clear the writer's memory. Does not free buffer. |
-** dlwAppend - append raw doclist data to buffer. |
-** dlwCopy - copy next doclist from reader to writer. |
-** dlwAdd - construct doclist element and append to buffer. |
-** Only apply dlwAdd() to DL_DOCIDS doclists (else use PLWriter). |
-*/ |
-typedef struct DLWriter { |
- DocListType iType; |
- DataBuffer *b; |
- sqlite_int64 iPrevDocid; |
-#ifndef NDEBUG |
- int has_iPrevDocid; |
-#endif |
-} DLWriter; |
- |
-static void dlwInit(DLWriter *pWriter, DocListType iType, DataBuffer *b){ |
- pWriter->b = b; |
- pWriter->iType = iType; |
- pWriter->iPrevDocid = 0; |
-#ifndef NDEBUG |
- pWriter->has_iPrevDocid = 0; |
-#endif |
-} |
-static void dlwDestroy(DLWriter *pWriter){ |
- SCRAMBLE(pWriter); |
-} |
-/* iFirstDocid is the first docid in the doclist in pData. It is |
-** needed because pData may point within a larger doclist, in which |
-** case the first item would be delta-encoded. |
+** <key> = <value> |
** |
-** iLastDocid is the final docid in the doclist in pData. It is |
-** needed to create the new iPrevDocid for future delta-encoding. The |
-** code could decode the passed doclist to recreate iLastDocid, but |
-** the only current user (docListMerge) already has decoded this |
-** information. |
+** There may not be whitespace surrounding the "=" character. The <value> |
+** term may be quoted, but the <key> may not. |
*/ |
-/* TODO(shess) This has become just a helper for docListMerge. |
-** Consider a refactor to make this cleaner. |
-*/ |
-static int dlwAppend(DLWriter *pWriter, |
- const char *pData, int nData, |
- sqlite_int64 iFirstDocid, sqlite_int64 iLastDocid){ |
- sqlite_int64 iDocid = 0; |
- char c[VARINT_MAX]; |
- int nFirstOld, nFirstNew; /* Old and new varint len of first docid. */ |
-#ifndef NDEBUG |
- sqlite_int64 iLastDocidDelta; |
-#endif |
- |
- /* Recode the initial docid as delta from iPrevDocid. */ |
- nFirstOld = fts3GetVarintSafe(pData, &iDocid, nData); |
- if( !nFirstOld ) return SQLITE_CORRUPT_BKPT; |
- assert( nFirstOld<nData || (nFirstOld==nData && pWriter->iType==DL_DOCIDS) ); |
- nFirstNew = fts3PutVarint(c, iFirstDocid-pWriter->iPrevDocid); |
+static int fts3IsSpecialColumn( |
+ const char *z, |
+ int *pnKey, |
+ char **pzValue |
+){ |
+ char *zValue; |
+ const char *zCsr = z; |
- /* Verify that the incoming doclist is valid AND that it ends with |
- ** the expected docid. This is essential because we'll trust this |
- ** docid in future delta-encoding. |
- */ |
- ASSERT_VALID_DOCLIST(pWriter->iType, pData, nData, &iLastDocidDelta); |
- assert( iLastDocid==iFirstDocid-iDocid+iLastDocidDelta ); |
+ while( *zCsr!='=' ){ |
+ if( *zCsr=='\0' ) return 0; |
+ zCsr++; |
+ } |
- /* Append recoded initial docid and everything else. Rest of docids |
- ** should have been delta-encoded from previous initial docid. |
- */ |
- if( nFirstOld<nData ){ |
- dataBufferAppend2(pWriter->b, c, nFirstNew, |
- pData+nFirstOld, nData-nFirstOld); |
- }else{ |
- dataBufferAppend(pWriter->b, c, nFirstNew); |
+ *pnKey = (int)(zCsr-z); |
+ zValue = sqlite3_mprintf("%s", &zCsr[1]); |
+ if( zValue ){ |
+ sqlite3Fts3Dequote(zValue); |
} |
- pWriter->iPrevDocid = iLastDocid; |
- return SQLITE_OK; |
-} |
-static int dlwCopy(DLWriter *pWriter, DLReader *pReader){ |
- return dlwAppend(pWriter, dlrDocData(pReader), dlrDocDataBytes(pReader), |
- dlrDocid(pReader), dlrDocid(pReader)); |
-} |
-static void dlwAdd(DLWriter *pWriter, sqlite_int64 iDocid){ |
- char c[VARINT_MAX]; |
- int n = fts3PutVarint(c, iDocid-pWriter->iPrevDocid); |
- |
- /* Docids must ascend. */ |
- assert( !pWriter->has_iPrevDocid || iDocid>pWriter->iPrevDocid ); |
- assert( pWriter->iType==DL_DOCIDS ); |
- |
- dataBufferAppend(pWriter->b, c, n); |
- pWriter->iPrevDocid = iDocid; |
-#ifndef NDEBUG |
- pWriter->has_iPrevDocid = 1; |
-#endif |
+ *pzValue = zValue; |
+ return 1; |
} |
-/*******************************************************************/ |
-/* PLReader is used to read data from a document's position list. As |
-** the caller steps through the list, data is cached so that varints |
-** only need to be decoded once. |
-** |
-** plrInit, plrDestroy - create/destroy a reader. |
-** plrColumn, plrPosition, plrStartOffset, plrEndOffset - accessors |
-** plrAtEnd - at end of stream, only call plrDestroy once true. |
-** plrStep - step to the next element. |
+/* |
+** Append the output of a printf() style formatting to an existing string. |
*/ |
-typedef struct PLReader { |
- /* These refer to the next position's data. nData will reach 0 when |
- ** reading the last position, so plrStep() signals EOF by setting |
- ** pData to NULL. |
- */ |
- const char *pData; |
- int nData; |
- |
- DocListType iType; |
- int iColumn; /* the last column read */ |
- int iPosition; /* the last position read */ |
- int iStartOffset; /* the last start offset read */ |
- int iEndOffset; /* the last end offset read */ |
-} PLReader; |
- |
-static int plrAtEnd(PLReader *pReader){ |
- return pReader->pData==NULL; |
-} |
-static int plrColumn(PLReader *pReader){ |
- assert( !plrAtEnd(pReader) ); |
- return pReader->iColumn; |
-} |
-static int plrPosition(PLReader *pReader){ |
- assert( !plrAtEnd(pReader) ); |
- return pReader->iPosition; |
-} |
-static int plrStartOffset(PLReader *pReader){ |
- assert( !plrAtEnd(pReader) ); |
- return pReader->iStartOffset; |
-} |
-static int plrEndOffset(PLReader *pReader){ |
- assert( !plrAtEnd(pReader) ); |
- return pReader->iEndOffset; |
-} |
-static int plrStep(PLReader *pReader){ |
- int i, n, nTotal = 0; |
- |
- assert( !plrAtEnd(pReader) ); |
- |
- if( pReader->nData<=0 ){ |
- pReader->pData = NULL; |
- return SQLITE_OK; |
- } |
- |
- n = fts3GetVarint32Safe(pReader->pData, &i, pReader->nData); |
- if( !n ) return SQLITE_CORRUPT_BKPT; |
- nTotal += n; |
- if( i==POS_COLUMN ){ |
- n = fts3GetVarint32Safe(pReader->pData+nTotal, &pReader->iColumn, pReader->nData-nTotal); |
- if( !n ) return SQLITE_CORRUPT_BKPT; |
- nTotal += n; |
- pReader->iPosition = 0; |
- pReader->iStartOffset = 0; |
- n = fts3GetVarint32Safe(pReader->pData+nTotal, &i, pReader->nData-nTotal); |
- if( !n ) return SQLITE_CORRUPT_BKPT; |
- nTotal += n; |
- } |
- /* Should never see adjacent column changes. */ |
- assert( i!=POS_COLUMN ); |
- |
- if( i==POS_END ){ |
- assert( nTotal<=pReader->nData ); |
- pReader->nData = 0; |
- pReader->pData = NULL; |
- return SQLITE_OK; |
- } |
- |
- pReader->iPosition += i-POS_BASE; |
- if( pReader->iType==DL_POSITIONS_OFFSETS ){ |
- n = fts3GetVarint32Safe(pReader->pData+nTotal, &i, pReader->nData-nTotal); |
- if( !n ) return SQLITE_CORRUPT_BKPT; |
- nTotal += n; |
- pReader->iStartOffset += i; |
- n = fts3GetVarint32Safe(pReader->pData+nTotal, &i, pReader->nData-nTotal); |
- if( !n ) return SQLITE_CORRUPT_BKPT; |
- nTotal += n; |
- pReader->iEndOffset = pReader->iStartOffset+i; |
+static void fts3Appendf( |
+ int *pRc, /* IN/OUT: Error code */ |
+ char **pz, /* IN/OUT: Pointer to string buffer */ |
+ const char *zFormat, /* Printf format string to append */ |
+ ... /* Arguments for printf format string */ |
+){ |
+ if( *pRc==SQLITE_OK ){ |
+ va_list ap; |
+ char *z; |
+ va_start(ap, zFormat); |
+ z = sqlite3_vmprintf(zFormat, ap); |
+ if( z && *pz ){ |
+ char *z2 = sqlite3_mprintf("%s%s", *pz, z); |
+ sqlite3_free(z); |
+ z = z2; |
+ } |
+ if( z==0 ) *pRc = SQLITE_NOMEM; |
+ sqlite3_free(*pz); |
+ *pz = z; |
} |
- assert( nTotal<=pReader->nData ); |
- pReader->pData += nTotal; |
- pReader->nData -= nTotal; |
- return SQLITE_OK; |
} |
-static void plrDestroy(PLReader *pReader){ |
- SCRAMBLE(pReader); |
-} |
-static int plrInit(PLReader *pReader, DLReader *pDLReader){ |
- int rc; |
- pReader->pData = dlrPosData(pDLReader); |
- pReader->nData = dlrPosDataLen(pDLReader); |
- pReader->iType = pDLReader->iType; |
- pReader->iColumn = 0; |
- pReader->iPosition = 0; |
- pReader->iStartOffset = 0; |
- pReader->iEndOffset = 0; |
- rc = plrStep(pReader); |
- if( rc!=SQLITE_OK ) plrDestroy(pReader); |
- return rc; |
+/* |
+** Return a copy of input string zInput enclosed in double-quotes (") and |
+** with all double quote characters escaped. For example: |
+** |
+** fts3QuoteId("un \"zip\"") -> "un \"\"zip\"\"" |
+** |
+** The pointer returned points to memory obtained from sqlite3_malloc(). It |
+** is the callers responsibility to call sqlite3_free() to release this |
+** memory. |
+*/ |
+static char *fts3QuoteId(char const *zInput){ |
+ int nRet; |
+ char *zRet; |
+ nRet = 2 + strlen(zInput)*2 + 1; |
+ zRet = sqlite3_malloc(nRet); |
+ if( zRet ){ |
+ int i; |
+ char *z = zRet; |
+ *(z++) = '"'; |
+ for(i=0; zInput[i]; i++){ |
+ if( zInput[i]=='"' ) *(z++) = '"'; |
+ *(z++) = zInput[i]; |
+ } |
+ *(z++) = '"'; |
+ *(z++) = '\0'; |
+ } |
+ return zRet; |
} |
-/*******************************************************************/ |
-/* PLWriter is used in constructing a document's position list. As a |
-** convenience, if iType is DL_DOCIDS, PLWriter becomes a no-op. |
-** PLWriter writes to the associated DLWriter's buffer. |
+/* |
+** Return a list of comma separated SQL expressions that could be used |
+** in a SELECT statement such as the following: |
** |
-** plwInit - init for writing a document's poslist. |
-** plwDestroy - clear a writer. |
-** plwAdd - append position and offset information. |
-** plwCopy - copy next position's data from reader to writer. |
-** plwTerminate - add any necessary doclist terminator. |
+** SELECT <list of expressions> FROM %_content AS x ... |
** |
-** Calling plwAdd() after plwTerminate() may result in a corrupt |
-** doclist. |
-*/ |
-/* TODO(shess) Until we've written the second item, we can cache the |
-** first item's information. Then we'd have three states: |
+** to return the docid, followed by each column of text data in order |
+** from left to write. If parameter zFunc is not NULL, then instead of |
+** being returned directly each column of text data is passed to an SQL |
+** function named zFunc first. For example, if zFunc is "unzip" and the |
+** table has the three user-defined columns "a", "b", and "c", the following |
+** string is returned: |
** |
-** - initialized with docid, no positions. |
-** - docid and one position. |
-** - docid and multiple positions. |
+** "docid, unzip(x.'a'), unzip(x.'b'), unzip(x.'c')" |
** |
-** Only the last state needs to actually write to dlw->b, which would |
-** be an improvement in the DLCollector case. |
-*/ |
-typedef struct PLWriter { |
- DLWriter *dlw; |
- |
- int iColumn; /* the last column written */ |
- int iPos; /* the last position written */ |
- int iOffset; /* the last start offset written */ |
-} PLWriter; |
- |
-/* TODO(shess) In the case where the parent is reading these values |
-** from a PLReader, we could optimize to a copy if that PLReader has |
-** the same type as pWriter. |
+** The pointer returned points to a buffer allocated by sqlite3_malloc(). It |
+** is the responsibility of the caller to eventually free it. |
+** |
+** If *pRc is not SQLITE_OK when this function is called, it is a no-op (and |
+** a NULL pointer is returned). Otherwise, if an OOM error is encountered |
+** by this function, NULL is returned and *pRc is set to SQLITE_NOMEM. If |
+** no error occurs, *pRc is left unmodified. |
*/ |
-static void plwAdd(PLWriter *pWriter, int iColumn, int iPos, |
- int iStartOffset, int iEndOffset){ |
- /* Worst-case space for POS_COLUMN, iColumn, iPosDelta, |
- ** iStartOffsetDelta, and iEndOffsetDelta. |
- */ |
- char c[5*VARINT_MAX]; |
- int n = 0; |
- |
- /* Ban plwAdd() after plwTerminate(). */ |
- assert( pWriter->iPos!=-1 ); |
- |
- if( pWriter->dlw->iType==DL_DOCIDS ) return; |
+static char *fts3ReadExprList(Fts3Table *p, const char *zFunc, int *pRc){ |
+ char *zRet = 0; |
+ char *zFree = 0; |
+ char *zFunction; |
+ int i; |
- if( iColumn!=pWriter->iColumn ){ |
- n += fts3PutVarint(c+n, POS_COLUMN); |
- n += fts3PutVarint(c+n, iColumn); |
- pWriter->iColumn = iColumn; |
- pWriter->iPos = 0; |
- pWriter->iOffset = 0; |
- } |
- assert( iPos>=pWriter->iPos ); |
- n += fts3PutVarint(c+n, POS_BASE+(iPos-pWriter->iPos)); |
- pWriter->iPos = iPos; |
- if( pWriter->dlw->iType==DL_POSITIONS_OFFSETS ){ |
- assert( iStartOffset>=pWriter->iOffset ); |
- n += fts3PutVarint(c+n, iStartOffset-pWriter->iOffset); |
- pWriter->iOffset = iStartOffset; |
- assert( iEndOffset>=iStartOffset ); |
- n += fts3PutVarint(c+n, iEndOffset-iStartOffset); |
+ if( !zFunc ){ |
+ zFunction = ""; |
+ }else{ |
+ zFree = zFunction = fts3QuoteId(zFunc); |
} |
- dataBufferAppend(pWriter->dlw->b, c, n); |
-} |
-static void plwCopy(PLWriter *pWriter, PLReader *pReader){ |
- plwAdd(pWriter, plrColumn(pReader), plrPosition(pReader), |
- plrStartOffset(pReader), plrEndOffset(pReader)); |
-} |
-static void plwInit(PLWriter *pWriter, DLWriter *dlw, sqlite_int64 iDocid){ |
- char c[VARINT_MAX]; |
- int n; |
- |
- pWriter->dlw = dlw; |
- |
- /* Docids must ascend. */ |
- assert( !pWriter->dlw->has_iPrevDocid || iDocid>pWriter->dlw->iPrevDocid ); |
- n = fts3PutVarint(c, iDocid-pWriter->dlw->iPrevDocid); |
- dataBufferAppend(pWriter->dlw->b, c, n); |
- pWriter->dlw->iPrevDocid = iDocid; |
-#ifndef NDEBUG |
- pWriter->dlw->has_iPrevDocid = 1; |
-#endif |
- |
- pWriter->iColumn = 0; |
- pWriter->iPos = 0; |
- pWriter->iOffset = 0; |
-} |
-/* TODO(shess) Should plwDestroy() also terminate the doclist? But |
-** then plwDestroy() would no longer be just a destructor, it would |
-** also be doing work, which isn't consistent with the overall idiom. |
-** Another option would be for plwAdd() to always append any necessary |
-** terminator, so that the output is always correct. But that would |
-** add incremental work to the common case with the only benefit being |
-** API elegance. Punt for now. |
-*/ |
-static void plwTerminate(PLWriter *pWriter){ |
- if( pWriter->dlw->iType>DL_DOCIDS ){ |
- char c[VARINT_MAX]; |
- int n = fts3PutVarint(c, POS_END); |
- dataBufferAppend(pWriter->dlw->b, c, n); |
+ fts3Appendf(pRc, &zRet, "docid"); |
+ for(i=0; i<p->nColumn; i++){ |
+ fts3Appendf(pRc, &zRet, ",%s(x.'c%d%q')", zFunction, i, p->azColumn[i]); |
} |
-#ifndef NDEBUG |
- /* Mark as terminated for assert in plwAdd(). */ |
- pWriter->iPos = -1; |
-#endif |
-} |
-static void plwDestroy(PLWriter *pWriter){ |
- SCRAMBLE(pWriter); |
+ sqlite3_free(zFree); |
+ return zRet; |
} |
-/*******************************************************************/ |
-/* DLCollector wraps PLWriter and DLWriter to provide a |
-** dynamically-allocated doclist area to use during tokenization. |
+/* |
+** Return a list of N comma separated question marks, where N is the number |
+** of columns in the %_content table (one for the docid plus one for each |
+** user-defined text column). |
** |
-** dlcNew - malloc up and initialize a collector. |
-** dlcDelete - destroy a collector and all contained items. |
-** dlcAddPos - append position and offset information. |
-** dlcAddDoclist - add the collected doclist to the given buffer. |
-** dlcNext - terminate the current document and open another. |
-*/ |
-typedef struct DLCollector { |
- DataBuffer b; |
- DLWriter dlw; |
- PLWriter plw; |
-} DLCollector; |
- |
-/* TODO(shess) This could also be done by calling plwTerminate() and |
-** dataBufferAppend(). I tried that, expecting nominal performance |
-** differences, but it seemed to pretty reliably be worth 1% to code |
-** it this way. I suspect it is the incremental malloc overhead (some |
-** percentage of the plwTerminate() calls will cause a realloc), so |
-** this might be worth revisiting if the DataBuffer implementation |
-** changes. |
+** If argument zFunc is not NULL, then all but the first question mark |
+** is preceded by zFunc and an open bracket, and followed by a closed |
+** bracket. For example, if zFunc is "zip" and the FTS3 table has three |
+** user-defined text columns, the following string is returned: |
+** |
+** "?, zip(?), zip(?), zip(?)" |
+** |
+** The pointer returned points to a buffer allocated by sqlite3_malloc(). It |
+** is the responsibility of the caller to eventually free it. |
+** |
+** If *pRc is not SQLITE_OK when this function is called, it is a no-op (and |
+** a NULL pointer is returned). Otherwise, if an OOM error is encountered |
+** by this function, NULL is returned and *pRc is set to SQLITE_NOMEM. If |
+** no error occurs, *pRc is left unmodified. |
*/ |
-static void dlcAddDoclist(DLCollector *pCollector, DataBuffer *b){ |
- if( pCollector->dlw.iType>DL_DOCIDS ){ |
- char c[VARINT_MAX]; |
- int n = fts3PutVarint(c, POS_END); |
- dataBufferAppend2(b, pCollector->b.pData, pCollector->b.nData, c, n); |
+static char *fts3WriteExprList(Fts3Table *p, const char *zFunc, int *pRc){ |
+ char *zRet = 0; |
+ char *zFree = 0; |
+ char *zFunction; |
+ int i; |
+ |
+ if( !zFunc ){ |
+ zFunction = ""; |
}else{ |
- dataBufferAppend(b, pCollector->b.pData, pCollector->b.nData); |
+ zFree = zFunction = fts3QuoteId(zFunc); |
} |
+ fts3Appendf(pRc, &zRet, "?"); |
+ for(i=0; i<p->nColumn; i++){ |
+ fts3Appendf(pRc, &zRet, ",%s(?)", zFunction); |
+ } |
+ sqlite3_free(zFree); |
+ return zRet; |
} |
-static void dlcNext(DLCollector *pCollector, sqlite_int64 iDocid){ |
- plwTerminate(&pCollector->plw); |
- plwDestroy(&pCollector->plw); |
- plwInit(&pCollector->plw, &pCollector->dlw, iDocid); |
-} |
-static void dlcAddPos(DLCollector *pCollector, int iColumn, int iPos, |
- int iStartOffset, int iEndOffset){ |
- plwAdd(&pCollector->plw, iColumn, iPos, iStartOffset, iEndOffset); |
-} |
- |
-static DLCollector *dlcNew(sqlite_int64 iDocid, DocListType iType){ |
- DLCollector *pCollector = sqlite3_malloc(sizeof(DLCollector)); |
- dataBufferInit(&pCollector->b, 0); |
- dlwInit(&pCollector->dlw, iType, &pCollector->b); |
- plwInit(&pCollector->plw, &pCollector->dlw, iDocid); |
- return pCollector; |
-} |
-static void dlcDelete(DLCollector *pCollector){ |
- plwDestroy(&pCollector->plw); |
- dlwDestroy(&pCollector->dlw); |
- dataBufferDestroy(&pCollector->b); |
- SCRAMBLE(pCollector); |
- sqlite3_free(pCollector); |
-} |
- |
- |
-/* Copy the doclist data of iType in pData/nData into *out, trimming |
-** unnecessary data as we go. Only columns matching iColumn are |
-** copied, all columns copied if iColumn is -1. Elements with no |
-** matching columns are dropped. The output is an iOutType doclist. |
-*/ |
-/* NOTE(shess) This code is only valid after all doclists are merged. |
-** If this is run before merges, then doclist items which represent |
-** deletion will be trimmed, and will thus not effect a deletion |
-** during the merge. |
-*/ |
-static int docListTrim(DocListType iType, const char *pData, int nData, |
- int iColumn, DocListType iOutType, DataBuffer *out){ |
- DLReader dlReader; |
- DLWriter dlWriter; |
- int rc; |
- assert( iOutType<=iType ); |
+/* |
+** This function is the implementation of both the xConnect and xCreate |
+** methods of the FTS3 virtual table. |
+** |
+** The argv[] array contains the following: |
+** |
+** argv[0] -> module name ("fts3" or "fts4") |
+** argv[1] -> database name |
+** argv[2] -> table name |
+** argv[...] -> "column name" and other module argument fields. |
+*/ |
+static int fts3InitVtab( |
+ int isCreate, /* True for xCreate, false for xConnect */ |
+ sqlite3 *db, /* The SQLite database connection */ |
+ void *pAux, /* Hash table containing tokenizers */ |
+ int argc, /* Number of elements in argv array */ |
+ const char * const *argv, /* xCreate/xConnect argument array */ |
+ sqlite3_vtab **ppVTab, /* Write the resulting vtab structure here */ |
+ char **pzErr /* Write any error message here */ |
+){ |
+ Fts3Hash *pHash = (Fts3Hash *)pAux; |
+ Fts3Table *p = 0; /* Pointer to allocated vtab */ |
+ int rc = SQLITE_OK; /* Return code */ |
+ int i; /* Iterator variable */ |
+ int nByte; /* Size of allocation used for *p */ |
+ int iCol; /* Column index */ |
+ int nString = 0; /* Bytes required to hold all column names */ |
+ int nCol = 0; /* Number of columns in the FTS table */ |
+ char *zCsr; /* Space for holding column names */ |
+ int nDb; /* Bytes required to hold database name */ |
+ int nName; /* Bytes required to hold table name */ |
+ int isFts4 = (argv[0][3]=='4'); /* True for FTS4, false for FTS3 */ |
+ int bNoDocsize = 0; /* True to omit %_docsize table */ |
+ const char **aCol; /* Array of column names */ |
+ sqlite3_tokenizer *pTokenizer = 0; /* Tokenizer for this table */ |
+ |
+ char *zCompress = 0; |
+ char *zUncompress = 0; |
+ |
+ assert( strlen(argv[0])==4 ); |
+ assert( (sqlite3_strnicmp(argv[0], "fts4", 4)==0 && isFts4) |
+ || (sqlite3_strnicmp(argv[0], "fts3", 4)==0 && !isFts4) |
+ ); |
- rc = dlrInit(&dlReader, iType, pData, nData); |
- if( rc!=SQLITE_OK ) return rc; |
- dlwInit(&dlWriter, iOutType, out); |
+ nDb = (int)strlen(argv[1]) + 1; |
+ nName = (int)strlen(argv[2]) + 1; |
- while( !dlrAtEnd(&dlReader) ){ |
- PLReader plReader; |
- PLWriter plWriter; |
- int match = 0; |
+ aCol = (const char **)sqlite3_malloc(sizeof(const char *) * (argc-2) ); |
+ if( !aCol ) return SQLITE_NOMEM; |
+ memset((void *)aCol, 0, sizeof(const char *) * (argc-2)); |
- rc = plrInit(&plReader, &dlReader); |
- if( rc!=SQLITE_OK ) break; |
+ /* Loop through all of the arguments passed by the user to the FTS3/4 |
+ ** module (i.e. all the column names and special arguments). This loop |
+ ** does the following: |
+ ** |
+ ** + Figures out the number of columns the FTSX table will have, and |
+ ** the number of bytes of space that must be allocated to store copies |
+ ** of the column names. |
+ ** |
+ ** + If there is a tokenizer specification included in the arguments, |
+ ** initializes the tokenizer pTokenizer. |
+ */ |
+ for(i=3; rc==SQLITE_OK && i<argc; i++){ |
+ char const *z = argv[i]; |
+ int nKey; |
+ char *zVal; |
+ |
+ /* Check if this is a tokenizer specification */ |
+ if( !pTokenizer |
+ && strlen(z)>8 |
+ && 0==sqlite3_strnicmp(z, "tokenize", 8) |
+ && 0==sqlite3Fts3IsIdChar(z[8]) |
+ ){ |
+ rc = sqlite3Fts3InitTokenizer(pHash, &z[9], &pTokenizer, pzErr); |
+ } |
- while( !plrAtEnd(&plReader) ){ |
- if( iColumn==-1 || plrColumn(&plReader)==iColumn ){ |
- if( !match ){ |
- plwInit(&plWriter, &dlWriter, dlrDocid(&dlReader)); |
- match = 1; |
- } |
- plwAdd(&plWriter, plrColumn(&plReader), plrPosition(&plReader), |
- plrStartOffset(&plReader), plrEndOffset(&plReader)); |
+ /* Check if it is an FTS4 special argument. */ |
+ else if( isFts4 && fts3IsSpecialColumn(z, &nKey, &zVal) ){ |
+ if( !zVal ){ |
+ rc = SQLITE_NOMEM; |
+ goto fts3_init_out; |
} |
- rc = plrStep(&plReader); |
- if( rc!=SQLITE_OK ){ |
- plrDestroy(&plReader); |
- goto err; |
+ if( nKey==9 && 0==sqlite3_strnicmp(z, "matchinfo", 9) ){ |
+ if( strlen(zVal)==4 && 0==sqlite3_strnicmp(zVal, "fts3", 4) ){ |
+ bNoDocsize = 1; |
+ }else{ |
+ *pzErr = sqlite3_mprintf("unrecognized matchinfo: %s", zVal); |
+ rc = SQLITE_ERROR; |
+ } |
+ }else if( nKey==8 && 0==sqlite3_strnicmp(z, "compress", 8) ){ |
+ zCompress = zVal; |
+ zVal = 0; |
+ }else if( nKey==10 && 0==sqlite3_strnicmp(z, "uncompress", 10) ){ |
+ zUncompress = zVal; |
+ zVal = 0; |
+ }else{ |
+ *pzErr = sqlite3_mprintf("unrecognized parameter: %s", z); |
+ rc = SQLITE_ERROR; |
} |
+ sqlite3_free(zVal); |
} |
- if( match ){ |
- plwTerminate(&plWriter); |
- plwDestroy(&plWriter); |
+ |
+ /* Otherwise, the argument is a column name. */ |
+ else { |
+ nString += (int)(strlen(z) + 1); |
+ aCol[nCol++] = z; |
} |
+ } |
+ if( rc!=SQLITE_OK ) goto fts3_init_out; |
- plrDestroy(&plReader); |
- rc = dlrStep(&dlReader); |
- if( rc!=SQLITE_OK ) break; |
+ if( nCol==0 ){ |
+ assert( nString==0 ); |
+ aCol[0] = "content"; |
+ nString = 8; |
+ nCol = 1; |
+ } |
+ |
+ if( pTokenizer==0 ){ |
+ rc = sqlite3Fts3InitTokenizer(pHash, "simple", &pTokenizer, pzErr); |
+ if( rc!=SQLITE_OK ) goto fts3_init_out; |
+ } |
+ assert( pTokenizer ); |
+ |
+ |
+ /* Allocate and populate the Fts3Table structure. */ |
+ nByte = sizeof(Fts3Table) + /* Fts3Table */ |
+ nCol * sizeof(char *) + /* azColumn */ |
+ nName + /* zName */ |
+ nDb + /* zDb */ |
+ nString; /* Space for azColumn strings */ |
+ p = (Fts3Table*)sqlite3_malloc(nByte); |
+ if( p==0 ){ |
+ rc = SQLITE_NOMEM; |
+ goto fts3_init_out; |
+ } |
+ memset(p, 0, nByte); |
+ p->db = db; |
+ p->nColumn = nCol; |
+ p->nPendingData = 0; |
+ p->azColumn = (char **)&p[1]; |
+ p->pTokenizer = pTokenizer; |
+ p->nNodeSize = 1000; |
+ p->nMaxPendingData = FTS3_MAX_PENDING_DATA; |
+ p->bHasDocsize = (isFts4 && bNoDocsize==0); |
+ p->bHasStat = isFts4; |
+ fts3HashInit(&p->pendingTerms, FTS3_HASH_STRING, 1); |
+ |
+ /* Fill in the zName and zDb fields of the vtab structure. */ |
+ zCsr = (char *)&p->azColumn[nCol]; |
+ p->zName = zCsr; |
+ memcpy(zCsr, argv[2], nName); |
+ zCsr += nName; |
+ p->zDb = zCsr; |
+ memcpy(zCsr, argv[1], nDb); |
+ zCsr += nDb; |
+ |
+ /* Fill in the azColumn array */ |
+ for(iCol=0; iCol<nCol; iCol++){ |
+ char *z; |
+ int n; |
+ z = (char *)sqlite3Fts3NextToken(aCol[iCol], &n); |
+ memcpy(zCsr, z, n); |
+ zCsr[n] = '\0'; |
+ sqlite3Fts3Dequote(zCsr); |
+ p->azColumn[iCol] = zCsr; |
+ zCsr += n+1; |
+ assert( zCsr <= &((char *)p)[nByte] ); |
+ } |
+ |
+ if( (zCompress==0)!=(zUncompress==0) ){ |
+ char const *zMiss = (zCompress==0 ? "compress" : "uncompress"); |
+ rc = SQLITE_ERROR; |
+ *pzErr = sqlite3_mprintf("missing %s parameter in fts4 constructor", zMiss); |
+ } |
+ p->zReadExprlist = fts3ReadExprList(p, zUncompress, &rc); |
+ p->zWriteExprlist = fts3WriteExprList(p, zCompress, &rc); |
+ if( rc!=SQLITE_OK ) goto fts3_init_out; |
+ |
+ /* If this is an xCreate call, create the underlying tables in the |
+ ** database. TODO: For xConnect(), it could verify that said tables exist. |
+ */ |
+ if( isCreate ){ |
+ rc = fts3CreateTables(p); |
+ } |
+ |
+ /* Figure out the page-size for the database. This is required in order to |
+ ** estimate the cost of loading large doclists from the database (see |
+ ** function sqlite3Fts3SegReaderCost() for details). |
+ */ |
+ fts3DatabasePageSize(&rc, p); |
+ |
+ /* Declare the table schema to SQLite. */ |
+ fts3DeclareVtab(&rc, p); |
+ |
+fts3_init_out: |
+ sqlite3_free(zCompress); |
+ sqlite3_free(zUncompress); |
+ sqlite3_free((void *)aCol); |
+ if( rc!=SQLITE_OK ){ |
+ if( p ){ |
+ fts3DisconnectMethod((sqlite3_vtab *)p); |
+ }else if( pTokenizer ){ |
+ pTokenizer->pModule->xDestroy(pTokenizer); |
+ } |
+ }else{ |
+ *ppVTab = &p->base; |
} |
-err: |
- dlwDestroy(&dlWriter); |
- dlrDestroy(&dlReader); |
return rc; |
} |
-/* Used by docListMerge() to keep doclists in the ascending order by |
-** docid, then ascending order by age (so the newest comes first). |
+/* |
+** The xConnect() and xCreate() methods for the virtual table. All the |
+** work is done in function fts3InitVtab(). |
+*/ |
+static int fts3ConnectMethod( |
+ sqlite3 *db, /* Database connection */ |
+ void *pAux, /* Pointer to tokenizer hash table */ |
+ int argc, /* Number of elements in argv array */ |
+ const char * const *argv, /* xCreate/xConnect argument array */ |
+ sqlite3_vtab **ppVtab, /* OUT: New sqlite3_vtab object */ |
+ char **pzErr /* OUT: sqlite3_malloc'd error message */ |
+){ |
+ return fts3InitVtab(0, db, pAux, argc, argv, ppVtab, pzErr); |
+} |
+static int fts3CreateMethod( |
+ sqlite3 *db, /* Database connection */ |
+ void *pAux, /* Pointer to tokenizer hash table */ |
+ int argc, /* Number of elements in argv array */ |
+ const char * const *argv, /* xCreate/xConnect argument array */ |
+ sqlite3_vtab **ppVtab, /* OUT: New sqlite3_vtab object */ |
+ char **pzErr /* OUT: sqlite3_malloc'd error message */ |
+){ |
+ return fts3InitVtab(1, db, pAux, argc, argv, ppVtab, pzErr); |
+} |
+ |
+/* |
+** Implementation of the xBestIndex method for FTS3 tables. There |
+** are three possible strategies, in order of preference: |
+** |
+** 1. Direct lookup by rowid or docid. |
+** 2. Full-text search using a MATCH operator on a non-docid column. |
+** 3. Linear scan of %_content table. |
*/ |
-typedef struct OrderedDLReader { |
- DLReader *pReader; |
+static int fts3BestIndexMethod(sqlite3_vtab *pVTab, sqlite3_index_info *pInfo){ |
+ Fts3Table *p = (Fts3Table *)pVTab; |
+ int i; /* Iterator variable */ |
+ int iCons = -1; /* Index of constraint to use */ |
- /* TODO(shess) If we assume that docListMerge pReaders is ordered by |
- ** age (which we do), then we could use pReader comparisons to break |
- ** ties. |
+ /* By default use a full table scan. This is an expensive option, |
+ ** so search through the constraints to see if a more efficient |
+ ** strategy is possible. |
*/ |
- int idx; |
-} OrderedDLReader; |
- |
-/* Order eof to end, then by docid asc, idx desc. */ |
-static int orderedDLReaderCmp(OrderedDLReader *r1, OrderedDLReader *r2){ |
- if( dlrAtEnd(r1->pReader) ){ |
- if( dlrAtEnd(r2->pReader) ) return 0; /* Both atEnd(). */ |
- return 1; /* Only r1 atEnd(). */ |
+ pInfo->idxNum = FTS3_FULLSCAN_SEARCH; |
+ pInfo->estimatedCost = 500000; |
+ for(i=0; i<pInfo->nConstraint; i++){ |
+ struct sqlite3_index_constraint *pCons = &pInfo->aConstraint[i]; |
+ if( pCons->usable==0 ) continue; |
+ |
+ /* A direct lookup on the rowid or docid column. Assign a cost of 1.0. */ |
+ if( pCons->op==SQLITE_INDEX_CONSTRAINT_EQ |
+ && (pCons->iColumn<0 || pCons->iColumn==p->nColumn+1 ) |
+ ){ |
+ pInfo->idxNum = FTS3_DOCID_SEARCH; |
+ pInfo->estimatedCost = 1.0; |
+ iCons = i; |
+ } |
+ |
+ /* A MATCH constraint. Use a full-text search. |
+ ** |
+ ** If there is more than one MATCH constraint available, use the first |
+ ** one encountered. If there is both a MATCH constraint and a direct |
+ ** rowid/docid lookup, prefer the MATCH strategy. This is done even |
+ ** though the rowid/docid lookup is faster than a MATCH query, selecting |
+ ** it would lead to an "unable to use function MATCH in the requested |
+ ** context" error. |
+ */ |
+ if( pCons->op==SQLITE_INDEX_CONSTRAINT_MATCH |
+ && pCons->iColumn>=0 && pCons->iColumn<=p->nColumn |
+ ){ |
+ pInfo->idxNum = FTS3_FULLTEXT_SEARCH + pCons->iColumn; |
+ pInfo->estimatedCost = 2.0; |
+ iCons = i; |
+ break; |
+ } |
} |
- if( dlrAtEnd(r2->pReader) ) return -1; /* Only r2 atEnd(). */ |
- |
- if( dlrDocid(r1->pReader)<dlrDocid(r2->pReader) ) return -1; |
- if( dlrDocid(r1->pReader)>dlrDocid(r2->pReader) ) return 1; |
- /* Descending on idx. */ |
- return r2->idx-r1->idx; |
+ if( iCons>=0 ){ |
+ pInfo->aConstraintUsage[iCons].argvIndex = 1; |
+ pInfo->aConstraintUsage[iCons].omit = 1; |
+ } |
+ return SQLITE_OK; |
} |
-/* Bubble p[0] to appropriate place in p[1..n-1]. Assumes that |
-** p[1..n-1] is already sorted. |
-*/ |
-/* TODO(shess) Is this frequent enough to warrant a binary search? |
-** Before implementing that, instrument the code to check. In most |
-** current usage, I expect that p[0] will be less than p[1] a very |
-** high proportion of the time. |
+/* |
+** Implementation of xOpen method. |
*/ |
-static void orderedDLReaderReorder(OrderedDLReader *p, int n){ |
- while( n>1 && orderedDLReaderCmp(p, p+1)>0 ){ |
- OrderedDLReader tmp = p[0]; |
- p[0] = p[1]; |
- p[1] = tmp; |
- n--; |
- p++; |
+static int fts3OpenMethod(sqlite3_vtab *pVTab, sqlite3_vtab_cursor **ppCsr){ |
+ sqlite3_vtab_cursor *pCsr; /* Allocated cursor */ |
+ |
+ UNUSED_PARAMETER(pVTab); |
+ |
+ /* Allocate a buffer large enough for an Fts3Cursor structure. If the |
+ ** allocation succeeds, zero it and return SQLITE_OK. Otherwise, |
+ ** if the allocation fails, return SQLITE_NOMEM. |
+ */ |
+ *ppCsr = pCsr = (sqlite3_vtab_cursor *)sqlite3_malloc(sizeof(Fts3Cursor)); |
+ if( !pCsr ){ |
+ return SQLITE_NOMEM; |
} |
+ memset(pCsr, 0, sizeof(Fts3Cursor)); |
+ return SQLITE_OK; |
} |
-/* Given an array of doclist readers, merge their doclist elements |
-** into out in sorted order (by docid), dropping elements from older |
-** readers when there is a duplicate docid. pReaders is assumed to be |
-** ordered by age, oldest first. |
-*/ |
-/* TODO(shess) nReaders must be <= MERGE_COUNT. This should probably |
-** be fixed. |
+/* |
+** Close the cursor. For additional information see the documentation |
+** on the xClose method of the virtual table interface. |
*/ |
-static int docListMerge(DataBuffer *out, |
- DLReader *pReaders, int nReaders){ |
- OrderedDLReader readers[MERGE_COUNT]; |
- DLWriter writer; |
- int i, n; |
- const char *pStart = 0; |
- int nStart = 0; |
- sqlite_int64 iFirstDocid = 0, iLastDocid = 0; |
- int rc = SQLITE_OK; |
+static int fts3CloseMethod(sqlite3_vtab_cursor *pCursor){ |
+ Fts3Cursor *pCsr = (Fts3Cursor *)pCursor; |
+ assert( ((Fts3Table *)pCsr->base.pVtab)->pSegments==0 ); |
+ sqlite3_finalize(pCsr->pStmt); |
+ sqlite3Fts3ExprFree(pCsr->pExpr); |
+ sqlite3Fts3FreeDeferredTokens(pCsr); |
+ sqlite3_free(pCsr->aDoclist); |
+ sqlite3_free(pCsr->aMatchinfo); |
+ sqlite3_free(pCsr); |
+ return SQLITE_OK; |
+} |
- assert( nReaders>0 ); |
- if( nReaders==1 ){ |
- dataBufferAppend(out, dlrDocData(pReaders), dlrAllDataBytes(pReaders)); |
+/* |
+** Position the pCsr->pStmt statement so that it is on the row |
+** of the %_content table that contains the last match. Return |
+** SQLITE_OK on success. |
+*/ |
+static int fts3CursorSeek(sqlite3_context *pContext, Fts3Cursor *pCsr){ |
+ if( pCsr->isRequireSeek ){ |
+ pCsr->isRequireSeek = 0; |
+ sqlite3_bind_int64(pCsr->pStmt, 1, pCsr->iPrevId); |
+ if( SQLITE_ROW==sqlite3_step(pCsr->pStmt) ){ |
+ return SQLITE_OK; |
+ }else{ |
+ int rc = sqlite3_reset(pCsr->pStmt); |
+ if( rc==SQLITE_OK ){ |
+ /* If no row was found and no error has occured, then the %_content |
+ ** table is missing a row that is present in the full-text index. |
+ ** The data structures are corrupt. |
+ */ |
+ rc = SQLITE_CORRUPT; |
+ } |
+ pCsr->isEof = 1; |
+ if( pContext ){ |
+ sqlite3_result_error_code(pContext, rc); |
+ } |
+ return rc; |
+ } |
+ }else{ |
return SQLITE_OK; |
} |
+} |
- assert( nReaders<=MERGE_COUNT ); |
- n = 0; |
- for(i=0; i<nReaders; i++){ |
- assert( pReaders[i].iType==pReaders[0].iType ); |
- readers[i].pReader = pReaders+i; |
- readers[i].idx = i; |
- n += dlrAllDataBytes(&pReaders[i]); |
- } |
- /* Conservatively size output to sum of inputs. Output should end |
- ** up strictly smaller than input. |
+/* |
+** This function is used to process a single interior node when searching |
+** a b-tree for a term or term prefix. The node data is passed to this |
+** function via the zNode/nNode parameters. The term to search for is |
+** passed in zTerm/nTerm. |
+** |
+** If piFirst is not NULL, then this function sets *piFirst to the blockid |
+** of the child node that heads the sub-tree that may contain the term. |
+** |
+** If piLast is not NULL, then *piLast is set to the right-most child node |
+** that heads a sub-tree that may contain a term for which zTerm/nTerm is |
+** a prefix. |
+** |
+** If an OOM error occurs, SQLITE_NOMEM is returned. Otherwise, SQLITE_OK. |
+*/ |
+static int fts3ScanInteriorNode( |
+ const char *zTerm, /* Term to select leaves for */ |
+ int nTerm, /* Size of term zTerm in bytes */ |
+ const char *zNode, /* Buffer containing segment interior node */ |
+ int nNode, /* Size of buffer at zNode */ |
+ sqlite3_int64 *piFirst, /* OUT: Selected child node */ |
+ sqlite3_int64 *piLast /* OUT: Selected child node */ |
+){ |
+ int rc = SQLITE_OK; /* Return code */ |
+ const char *zCsr = zNode; /* Cursor to iterate through node */ |
+ const char *zEnd = &zCsr[nNode];/* End of interior node buffer */ |
+ char *zBuffer = 0; /* Buffer to load terms into */ |
+ int nAlloc = 0; /* Size of allocated buffer */ |
+ int isFirstTerm = 1; /* True when processing first term on page */ |
+ sqlite3_int64 iChild; /* Block id of child node to descend to */ |
+ |
+ /* Skip over the 'height' varint that occurs at the start of every |
+ ** interior node. Then load the blockid of the left-child of the b-tree |
+ ** node into variable iChild. |
+ ** |
+ ** Even if the data structure on disk is corrupted, this (reading two |
+ ** varints from the buffer) does not risk an overread. If zNode is a |
+ ** root node, then the buffer comes from a SELECT statement. SQLite does |
+ ** not make this guarantee explicitly, but in practice there are always |
+ ** either more than 20 bytes of allocated space following the nNode bytes of |
+ ** contents, or two zero bytes. Or, if the node is read from the %_segments |
+ ** table, then there are always 20 bytes of zeroed padding following the |
+ ** nNode bytes of content (see sqlite3Fts3ReadBlock() for details). |
*/ |
- dataBufferExpand(out, n); |
- |
- /* Get the readers into sorted order. */ |
- while( i-->0 ){ |
- orderedDLReaderReorder(readers+i, nReaders-i); |
+ zCsr += sqlite3Fts3GetVarint(zCsr, &iChild); |
+ zCsr += sqlite3Fts3GetVarint(zCsr, &iChild); |
+ if( zCsr>zEnd ){ |
+ return SQLITE_CORRUPT; |
} |
- |
- dlwInit(&writer, pReaders[0].iType, out); |
- while( !dlrAtEnd(readers[0].pReader) ){ |
- sqlite_int64 iDocid = dlrDocid(readers[0].pReader); |
- |
- /* If this is a continuation of the current buffer to copy, extend |
- ** that buffer. memcpy() seems to be more efficient if it has a |
- ** lots of data to copy. |
+ |
+ while( zCsr<zEnd && (piFirst || piLast) ){ |
+ int cmp; /* memcmp() result */ |
+ int nSuffix; /* Size of term suffix */ |
+ int nPrefix = 0; /* Size of term prefix */ |
+ int nBuffer; /* Total term size */ |
+ |
+ /* Load the next term on the node into zBuffer. Use realloc() to expand |
+ ** the size of zBuffer if required. */ |
+ if( !isFirstTerm ){ |
+ zCsr += sqlite3Fts3GetVarint32(zCsr, &nPrefix); |
+ } |
+ isFirstTerm = 0; |
+ zCsr += sqlite3Fts3GetVarint32(zCsr, &nSuffix); |
+ |
+ /* NOTE(shess): Previous code checked for negative nPrefix and |
+ ** nSuffix and suffix overrunning zEnd. Additionally corrupt if |
+ ** the prefix is longer than the previous term, or if the suffix |
+ ** causes overflow. |
*/ |
- if( dlrDocData(readers[0].pReader)==pStart+nStart ){ |
- nStart += dlrDocDataBytes(readers[0].pReader); |
- }else{ |
- if( pStart!=0 ){ |
- rc = dlwAppend(&writer, pStart, nStart, iFirstDocid, iLastDocid); |
- if( rc!=SQLITE_OK ) goto err; |
+ if( nPrefix<0 || nSuffix<0 || nPrefix>nBuffer |
+ || &zCsr[nSuffix]<zCsr || &zCsr[nSuffix]>zEnd ){ |
+ rc = SQLITE_CORRUPT; |
+ goto finish_scan; |
+ } |
+ if( nPrefix+nSuffix>nAlloc ){ |
+ char *zNew; |
+ nAlloc = (nPrefix+nSuffix) * 2; |
+ zNew = (char *)sqlite3_realloc(zBuffer, nAlloc); |
+ if( !zNew ){ |
+ rc = SQLITE_NOMEM; |
+ goto finish_scan; |
} |
- pStart = dlrDocData(readers[0].pReader); |
- nStart = dlrDocDataBytes(readers[0].pReader); |
- iFirstDocid = iDocid; |
- } |
- iLastDocid = iDocid; |
- rc = dlrStep(readers[0].pReader); |
- if( rc!= SQLITE_OK ) goto err; |
- |
- /* Drop all of the older elements with the same docid. */ |
- for(i=1; i<nReaders && |
- !dlrAtEnd(readers[i].pReader) && |
- dlrDocid(readers[i].pReader)==iDocid; i++){ |
- rc = dlrStep(readers[i].pReader); |
- if( rc!=SQLITE_OK ) goto err; |
+ zBuffer = zNew; |
+ } |
+ memcpy(&zBuffer[nPrefix], zCsr, nSuffix); |
+ nBuffer = nPrefix + nSuffix; |
+ zCsr += nSuffix; |
+ |
+ /* Compare the term we are searching for with the term just loaded from |
+ ** the interior node. If the specified term is greater than or equal |
+ ** to the term from the interior node, then all terms on the sub-tree |
+ ** headed by node iChild are smaller than zTerm. No need to search |
+ ** iChild. |
+ ** |
+ ** If the interior node term is larger than the specified term, then |
+ ** the tree headed by iChild may contain the specified term. |
+ */ |
+ cmp = memcmp(zTerm, zBuffer, (nBuffer>nTerm ? nTerm : nBuffer)); |
+ if( piFirst && (cmp<0 || (cmp==0 && nBuffer>nTerm)) ){ |
+ *piFirst = iChild; |
+ piFirst = 0; |
} |
- /* Get the readers back into order. */ |
- while( i-->0 ){ |
- orderedDLReaderReorder(readers+i, nReaders-i); |
+ if( piLast && cmp<0 ){ |
+ *piLast = iChild; |
+ piLast = 0; |
} |
- } |
- /* Copy over any remaining elements. */ |
- if( nStart>0 ) rc = dlwAppend(&writer, pStart, nStart, iFirstDocid, iLastDocid); |
-err: |
- dlwDestroy(&writer); |
+ iChild++; |
+ }; |
+ |
+ if( piFirst ) *piFirst = iChild; |
+ if( piLast ) *piLast = iChild; |
+ |
+ finish_scan: |
+ sqlite3_free(zBuffer); |
return rc; |
} |
-/* Helper function for posListUnion(). Compares the current position |
-** between left and right, returning as standard C idiom of <0 if |
-** left<right, >0 if left>right, and 0 if left==right. "End" always |
-** compares greater. |
-*/ |
-static int posListCmp(PLReader *pLeft, PLReader *pRight){ |
- assert( pLeft->iType==pRight->iType ); |
- if( pLeft->iType==DL_DOCIDS ) return 0; |
- if( plrAtEnd(pLeft) ) return plrAtEnd(pRight) ? 0 : 1; |
- if( plrAtEnd(pRight) ) return -1; |
+/* |
+** The buffer pointed to by argument zNode (size nNode bytes) contains an |
+** interior node of a b-tree segment. The zTerm buffer (size nTerm bytes) |
+** contains a term. This function searches the sub-tree headed by the zNode |
+** node for the range of leaf nodes that may contain the specified term |
+** or terms for which the specified term is a prefix. |
+** |
+** If piLeaf is not NULL, then *piLeaf is set to the blockid of the |
+** left-most leaf node in the tree that may contain the specified term. |
+** If piLeaf2 is not NULL, then *piLeaf2 is set to the blockid of the |
+** right-most leaf node that may contain a term for which the specified |
+** term is a prefix. |
+** |
+** It is possible that the range of returned leaf nodes does not contain |
+** the specified term or any terms for which it is a prefix. However, if the |
+** segment does contain any such terms, they are stored within the identified |
+** range. Because this function only inspects interior segment nodes (and |
+** never loads leaf nodes into memory), it is not possible to be sure. |
+** |
+** If an error occurs, an error code other than SQLITE_OK is returned. |
+*/ |
+static int fts3SelectLeaf( |
+ Fts3Table *p, /* Virtual table handle */ |
+ const char *zTerm, /* Term to select leaves for */ |
+ int nTerm, /* Size of term zTerm in bytes */ |
+ const char *zNode, /* Buffer containing segment interior node */ |
+ int nNode, /* Size of buffer at zNode */ |
+ sqlite3_int64 *piLeaf, /* Selected leaf node */ |
+ sqlite3_int64 *piLeaf2 /* Selected leaf node */ |
+){ |
+ int rc; /* Return code */ |
+ int iHeight; /* Height of this node in tree */ |
- if( plrColumn(pLeft)<plrColumn(pRight) ) return -1; |
- if( plrColumn(pLeft)>plrColumn(pRight) ) return 1; |
+ assert( piLeaf || piLeaf2 ); |
- if( plrPosition(pLeft)<plrPosition(pRight) ) return -1; |
- if( plrPosition(pLeft)>plrPosition(pRight) ) return 1; |
- if( pLeft->iType==DL_POSITIONS ) return 0; |
+ sqlite3Fts3GetVarint32(zNode, &iHeight); |
+ rc = fts3ScanInteriorNode(zTerm, nTerm, zNode, nNode, piLeaf, piLeaf2); |
+ assert( !piLeaf2 || !piLeaf || rc!=SQLITE_OK || (*piLeaf<=*piLeaf2) ); |
- if( plrStartOffset(pLeft)<plrStartOffset(pRight) ) return -1; |
- if( plrStartOffset(pLeft)>plrStartOffset(pRight) ) return 1; |
+ if( rc==SQLITE_OK && iHeight>1 ){ |
+ char *zBlob = 0; /* Blob read from %_segments table */ |
+ int nBlob; /* Size of zBlob in bytes */ |
+ |
+ if( piLeaf && piLeaf2 && (*piLeaf!=*piLeaf2) ){ |
+ rc = sqlite3Fts3ReadBlock(p, *piLeaf, &zBlob, &nBlob); |
+ if( rc==SQLITE_OK ){ |
+ rc = fts3SelectLeaf(p, zTerm, nTerm, zBlob, nBlob, piLeaf, 0); |
+ } |
+ sqlite3_free(zBlob); |
+ piLeaf = 0; |
+ zBlob = 0; |
+ } |
- if( plrEndOffset(pLeft)<plrEndOffset(pRight) ) return -1; |
- if( plrEndOffset(pLeft)>plrEndOffset(pRight) ) return 1; |
+ if( rc==SQLITE_OK ){ |
+ rc = sqlite3Fts3ReadBlock(p, piLeaf ? *piLeaf : *piLeaf2, &zBlob, &nBlob); |
+ } |
+ if( rc==SQLITE_OK ){ |
+ rc = fts3SelectLeaf(p, zTerm, nTerm, zBlob, nBlob, piLeaf, piLeaf2); |
+ } |
+ sqlite3_free(zBlob); |
+ } |
- return 0; |
+ return rc; |
} |
-/* Write the union of position lists in pLeft and pRight to pOut. |
-** "Union" in this case meaning "All unique position tuples". Should |
-** work with any doclist type, though both inputs and the output |
-** should be the same type. |
+/* |
+** This function is used to create delta-encoded serialized lists of FTS3 |
+** varints. Each call to this function appends a single varint to a list. |
*/ |
-static int posListUnion(DLReader *pLeft, DLReader *pRight, DLWriter *pOut){ |
- PLReader left, right; |
- PLWriter writer; |
- int rc; |
- |
- assert( dlrDocid(pLeft)==dlrDocid(pRight) ); |
- assert( pLeft->iType==pRight->iType ); |
- assert( pLeft->iType==pOut->iType ); |
+static void fts3PutDeltaVarint( |
+ char **pp, /* IN/OUT: Output pointer */ |
+ sqlite3_int64 *piPrev, /* IN/OUT: Previous value written to list */ |
+ sqlite3_int64 iVal /* Write this value to the list */ |
+){ |
+ assert( iVal-*piPrev > 0 || (*piPrev==0 && iVal==0) ); |
+ *pp += sqlite3Fts3PutVarint(*pp, iVal-*piPrev); |
+ *piPrev = iVal; |
+} |
- rc = plrInit(&left, pLeft); |
- if( rc!=SQLITE_OK ) return rc; |
- rc = plrInit(&right, pRight); |
- if( rc!=SQLITE_OK ){ |
- plrDestroy(&left); |
- return rc; |
- } |
- plwInit(&writer, pOut, dlrDocid(pLeft)); |
- |
- while( !plrAtEnd(&left) || !plrAtEnd(&right) ){ |
- int c = posListCmp(&left, &right); |
- if( c<0 ){ |
- plwCopy(&writer, &left); |
- rc = plrStep(&left); |
- if( rc!=SQLITE_OK ) break; |
- }else if( c>0 ){ |
- plwCopy(&writer, &right); |
- rc = plrStep(&right); |
- if( rc!=SQLITE_OK ) break; |
- }else{ |
- plwCopy(&writer, &left); |
- rc = plrStep(&left); |
- if( rc!=SQLITE_OK ) break; |
- rc = plrStep(&right); |
- if( rc!=SQLITE_OK ) break; |
- } |
+/* |
+** When this function is called, *ppPoslist is assumed to point to the |
+** start of a position-list. After it returns, *ppPoslist points to the |
+** first byte after the position-list. |
+** |
+** A position list is list of positions (delta encoded) and columns for |
+** a single document record of a doclist. So, in other words, this |
+** routine advances *ppPoslist so that it points to the next docid in |
+** the doclist, or to the first byte past the end of the doclist. |
+** |
+** If pp is not NULL, then the contents of the position list are copied |
+** to *pp. *pp is set to point to the first byte past the last byte copied |
+** before this function returns. |
+*/ |
+static void fts3PoslistCopy(char **pp, char **ppPoslist){ |
+ char *pEnd = *ppPoslist; |
+ char c = 0; |
+ |
+ /* The end of a position list is marked by a zero encoded as an FTS3 |
+ ** varint. A single POS_END (0) byte. Except, if the 0 byte is preceded by |
+ ** a byte with the 0x80 bit set, then it is not a varint 0, but the tail |
+ ** of some other, multi-byte, value. |
+ ** |
+ ** The following while-loop moves pEnd to point to the first byte that is not |
+ ** immediately preceded by a byte with the 0x80 bit set. Then increments |
+ ** pEnd once more so that it points to the byte immediately following the |
+ ** last byte in the position-list. |
+ */ |
+ while( *pEnd | c ){ |
+ c = *pEnd++ & 0x80; |
+ testcase( c!=0 && (*pEnd)==0 ); |
} |
+ pEnd++; /* Advance past the POS_END terminator byte */ |
- plwTerminate(&writer); |
- plwDestroy(&writer); |
- plrDestroy(&left); |
- plrDestroy(&right); |
- return rc; |
+ if( pp ){ |
+ int n = (int)(pEnd - *ppPoslist); |
+ char *p = *pp; |
+ memcpy(p, *ppPoslist, n); |
+ p += n; |
+ *pp = p; |
+ } |
+ *ppPoslist = pEnd; |
} |
-/* Write the union of doclists in pLeft and pRight to pOut. For |
-** docids in common between the inputs, the union of the position |
-** lists is written. Inputs and outputs are always type DL_DEFAULT. |
+/* |
+** When this function is called, *ppPoslist is assumed to point to the |
+** start of a column-list. After it returns, *ppPoslist points to the |
+** to the terminator (POS_COLUMN or POS_END) byte of the column-list. |
+** |
+** A column-list is list of delta-encoded positions for a single column |
+** within a single document within a doclist. |
+** |
+** The column-list is terminated either by a POS_COLUMN varint (1) or |
+** a POS_END varint (0). This routine leaves *ppPoslist pointing to |
+** the POS_COLUMN or POS_END that terminates the column-list. |
+** |
+** If pp is not NULL, then the contents of the column-list are copied |
+** to *pp. *pp is set to point to the first byte past the last byte copied |
+** before this function returns. The POS_COLUMN or POS_END terminator |
+** is not copied into *pp. |
*/ |
-static int docListUnion( |
- const char *pLeft, int nLeft, |
- const char *pRight, int nRight, |
- DataBuffer *pOut /* Write the combined doclist here */ |
-){ |
- DLReader left, right; |
- DLWriter writer; |
- int rc; |
+static void fts3ColumnlistCopy(char **pp, char **ppPoslist){ |
+ char *pEnd = *ppPoslist; |
+ char c = 0; |
- if( nLeft==0 ){ |
- if( nRight!=0) dataBufferAppend(pOut, pRight, nRight); |
- return SQLITE_OK; |
+ /* A column-list is terminated by either a 0x01 or 0x00 byte that is |
+ ** not part of a multi-byte varint. |
+ */ |
+ while( 0xFE & (*pEnd | c) ){ |
+ c = *pEnd++ & 0x80; |
+ testcase( c!=0 && ((*pEnd)&0xfe)==0 ); |
} |
- if( nRight==0 ){ |
- dataBufferAppend(pOut, pLeft, nLeft); |
- return SQLITE_OK; |
+ if( pp ){ |
+ int n = (int)(pEnd - *ppPoslist); |
+ char *p = *pp; |
+ memcpy(p, *ppPoslist, n); |
+ p += n; |
+ *pp = p; |
} |
+ *ppPoslist = pEnd; |
+} |
- rc = dlrInit(&left, DL_DEFAULT, pLeft, nLeft); |
- if( rc!=SQLITE_OK ) return rc; |
- rc = dlrInit(&right, DL_DEFAULT, pRight, nRight); |
- if( rc!=SQLITE_OK){ |
- dlrDestroy(&left); |
- return rc; |
+/* |
+** Value used to signify the end of an position-list. This is safe because |
+** it is not possible to have a document with 2^31 terms. |
+*/ |
+#define POSITION_LIST_END 0x7fffffff |
+ |
+/* |
+** This function is used to help parse position-lists. When this function is |
+** called, *pp may point to the start of the next varint in the position-list |
+** being parsed, or it may point to 1 byte past the end of the position-list |
+** (in which case **pp will be a terminator bytes POS_END (0) or |
+** (1)). |
+** |
+** If *pp points past the end of the current position-list, set *pi to |
+** POSITION_LIST_END and return. Otherwise, read the next varint from *pp, |
+** increment the current value of *pi by the value read, and set *pp to |
+** point to the next value before returning. |
+** |
+** Before calling this routine *pi must be initialized to the value of |
+** the previous position, or zero if we are reading the first position |
+** in the position-list. Because positions are delta-encoded, the value |
+** of the previous position is needed in order to compute the value of |
+** the next position. |
+*/ |
+static void fts3ReadNextPos( |
+ char **pp, /* IN/OUT: Pointer into position-list buffer */ |
+ sqlite3_int64 *pi /* IN/OUT: Value read from position-list */ |
+){ |
+ if( (**pp)&0xFE ){ |
+ fts3GetDeltaVarint(pp, pi); |
+ *pi -= 2; |
+ }else{ |
+ *pi = POSITION_LIST_END; |
+ } |
+} |
+ |
+/* |
+** If parameter iCol is not 0, write an POS_COLUMN (1) byte followed by |
+** the value of iCol encoded as a varint to *pp. This will start a new |
+** column list. |
+** |
+** Set *pp to point to the byte just after the last byte written before |
+** returning (do not modify it if iCol==0). Return the total number of bytes |
+** written (0 if iCol==0). |
+*/ |
+static int fts3PutColNumber(char **pp, int iCol){ |
+ int n = 0; /* Number of bytes written */ |
+ if( iCol ){ |
+ char *p = *pp; /* Output pointer */ |
+ n = 1 + sqlite3Fts3PutVarint(&p[1], iCol); |
+ *p = 0x01; |
+ *pp = &p[n]; |
} |
- dlwInit(&writer, DL_DEFAULT, pOut); |
- |
- while( !dlrAtEnd(&left) || !dlrAtEnd(&right) ){ |
- if( dlrAtEnd(&right) ){ |
- rc = dlwCopy(&writer, &left); |
- if( rc!=SQLITE_OK) break; |
- rc = dlrStep(&left); |
- if( rc!=SQLITE_OK) break; |
- }else if( dlrAtEnd(&left) ){ |
- rc = dlwCopy(&writer, &right); |
- if( rc!=SQLITE_OK ) break; |
- rc = dlrStep(&right); |
- if( rc!=SQLITE_OK ) break; |
- }else if( dlrDocid(&left)<dlrDocid(&right) ){ |
- rc = dlwCopy(&writer, &left); |
- if( rc!=SQLITE_OK ) break; |
- rc = dlrStep(&left); |
- if( rc!=SQLITE_OK ) break; |
- }else if( dlrDocid(&left)>dlrDocid(&right) ){ |
- rc = dlwCopy(&writer, &right); |
- if( rc!=SQLITE_OK ) break; |
- rc = dlrStep(&right); |
- if( rc!=SQLITE_OK ) break; |
+ return n; |
+} |
+ |
+/* |
+** Compute the union of two position lists. The output written |
+** into *pp contains all positions of both *pp1 and *pp2 in sorted |
+** order and with any duplicates removed. All pointers are |
+** updated appropriately. The caller is responsible for insuring |
+** that there is enough space in *pp to hold the complete output. |
+*/ |
+static void fts3PoslistMerge( |
+ char **pp, /* Output buffer */ |
+ char **pp1, /* Left input list */ |
+ char **pp2 /* Right input list */ |
+){ |
+ char *p = *pp; |
+ char *p1 = *pp1; |
+ char *p2 = *pp2; |
+ |
+ while( *p1 || *p2 ){ |
+ int iCol1; /* The current column index in pp1 */ |
+ int iCol2; /* The current column index in pp2 */ |
+ |
+ if( *p1==POS_COLUMN ) sqlite3Fts3GetVarint32(&p1[1], &iCol1); |
+ else if( *p1==POS_END ) iCol1 = POSITION_LIST_END; |
+ else iCol1 = 0; |
+ |
+ if( *p2==POS_COLUMN ) sqlite3Fts3GetVarint32(&p2[1], &iCol2); |
+ else if( *p2==POS_END ) iCol2 = POSITION_LIST_END; |
+ else iCol2 = 0; |
+ |
+ if( iCol1==iCol2 ){ |
+ sqlite3_int64 i1 = 0; /* Last position from pp1 */ |
+ sqlite3_int64 i2 = 0; /* Last position from pp2 */ |
+ sqlite3_int64 iPrev = 0; |
+ int n = fts3PutColNumber(&p, iCol1); |
+ p1 += n; |
+ p2 += n; |
+ |
+ /* At this point, both p1 and p2 point to the start of column-lists |
+ ** for the same column (the column with index iCol1 and iCol2). |
+ ** A column-list is a list of non-negative delta-encoded varints, each |
+ ** incremented by 2 before being stored. Each list is terminated by a |
+ ** POS_END (0) or POS_COLUMN (1). The following block merges the two lists |
+ ** and writes the results to buffer p. p is left pointing to the byte |
+ ** after the list written. No terminator (POS_END or POS_COLUMN) is |
+ ** written to the output. |
+ */ |
+ fts3GetDeltaVarint(&p1, &i1); |
+ fts3GetDeltaVarint(&p2, &i2); |
+ do { |
+ fts3PutDeltaVarint(&p, &iPrev, (i1<i2) ? i1 : i2); |
+ iPrev -= 2; |
+ if( i1==i2 ){ |
+ fts3ReadNextPos(&p1, &i1); |
+ fts3ReadNextPos(&p2, &i2); |
+ }else if( i1<i2 ){ |
+ fts3ReadNextPos(&p1, &i1); |
+ }else{ |
+ fts3ReadNextPos(&p2, &i2); |
+ } |
+ }while( i1!=POSITION_LIST_END || i2!=POSITION_LIST_END ); |
+ }else if( iCol1<iCol2 ){ |
+ p1 += fts3PutColNumber(&p, iCol1); |
+ fts3ColumnlistCopy(&p, &p1); |
}else{ |
- rc = posListUnion(&left, &right, &writer); |
- if( rc!=SQLITE_OK ) break; |
- rc = dlrStep(&left); |
- if( rc!=SQLITE_OK ) break; |
- rc = dlrStep(&right); |
- if( rc!=SQLITE_OK ) break; |
+ p2 += fts3PutColNumber(&p, iCol2); |
+ fts3ColumnlistCopy(&p, &p2); |
} |
} |
- dlrDestroy(&left); |
- dlrDestroy(&right); |
- dlwDestroy(&writer); |
- return rc; |
+ *p++ = POS_END; |
+ *pp = p; |
+ *pp1 = p1 + 1; |
+ *pp2 = p2 + 1; |
} |
-/* |
-** This function is used as part of the implementation of phrase and |
-** NEAR matching. |
+/* |
+** nToken==1 searches for adjacent positions. |
+** |
+** This function is used to merge two position lists into one. When it is |
+** called, *pp1 and *pp2 must both point to position lists. A position-list is |
+** the part of a doclist that follows each document id. For example, if a row |
+** contains: |
** |
-** pLeft and pRight are DLReaders positioned to the same docid in |
-** lists of type DL_POSITION. This function writes an entry to the |
-** DLWriter pOut for each position in pRight that is less than |
-** (nNear+1) greater (but not equal to or smaller) than a position |
-** in pLeft. For example, if nNear is 0, and the positions contained |
-** by pLeft and pRight are: |
+** 'a b c'|'x y z'|'a b b a' |
** |
-** pLeft: 5 10 15 20 |
-** pRight: 6 9 17 21 |
+** Then the position list for this row for token 'b' would consist of: |
** |
-** then the docid is added to pOut. If pOut is of type DL_POSITIONS, |
-** then a positionids "6" and "21" are also added to pOut. |
+** 0x02 0x01 0x02 0x03 0x03 0x00 |
** |
-** If boolean argument isSaveLeft is true, then positionids are copied |
-** from pLeft instead of pRight. In the example above, the positions "5" |
-** and "20" would be added instead of "6" and "21". |
+** When this function returns, both *pp1 and *pp2 are left pointing to the |
+** byte following the 0x00 terminator of their respective position lists. |
+** |
+** If isSaveLeft is 0, an entry is added to the output position list for |
+** each position in *pp2 for which there exists one or more positions in |
+** *pp1 so that (pos(*pp2)>pos(*pp1) && pos(*pp2)-pos(*pp1)<=nToken). i.e. |
+** when the *pp1 token appears before the *pp2 token, but not more than nToken |
+** slots before it. |
*/ |
-static int posListPhraseMerge( |
- DLReader *pLeft, |
- DLReader *pRight, |
- int nNear, |
- int isSaveLeft, |
- DLWriter *pOut |
+static int fts3PoslistPhraseMerge( |
+ char **pp, /* IN/OUT: Preallocated output buffer */ |
+ int nToken, /* Maximum difference in token positions */ |
+ int isSaveLeft, /* Save the left position */ |
+ int isExact, /* If *pp1 is exactly nTokens before *pp2 */ |
+ char **pp1, /* IN/OUT: Left input list */ |
+ char **pp2 /* IN/OUT: Right input list */ |
){ |
- PLReader left, right; |
- PLWriter writer; |
- int match = 0; |
- int rc; |
- |
- assert( dlrDocid(pLeft)==dlrDocid(pRight) ); |
- assert( pOut->iType!=DL_POSITIONS_OFFSETS ); |
+ char *p = (pp ? *pp : 0); |
+ char *p1 = *pp1; |
+ char *p2 = *pp2; |
+ int iCol1 = 0; |
+ int iCol2 = 0; |
+ |
+ /* Never set both isSaveLeft and isExact for the same invocation. */ |
+ assert( isSaveLeft==0 || isExact==0 ); |
+ |
+ assert( *p1!=0 && *p2!=0 ); |
+ if( *p1==POS_COLUMN ){ |
+ p1++; |
+ p1 += sqlite3Fts3GetVarint32(p1, &iCol1); |
+ } |
+ if( *p2==POS_COLUMN ){ |
+ p2++; |
+ p2 += sqlite3Fts3GetVarint32(p2, &iCol2); |
+ } |
+ |
+ while( 1 ){ |
+ if( iCol1==iCol2 ){ |
+ char *pSave = p; |
+ sqlite3_int64 iPrev = 0; |
+ sqlite3_int64 iPos1 = 0; |
+ sqlite3_int64 iPos2 = 0; |
+ |
+ if( pp && iCol1 ){ |
+ *p++ = POS_COLUMN; |
+ p += sqlite3Fts3PutVarint(p, iCol1); |
+ } |
- rc = plrInit(&left, pLeft); |
- if( rc!=SQLITE_OK ) return rc; |
- rc = plrInit(&right, pRight); |
- if( rc!=SQLITE_OK ){ |
- plrDestroy(&left); |
- return rc; |
- } |
+ assert( *p1!=POS_END && *p1!=POS_COLUMN ); |
+ assert( *p2!=POS_END && *p2!=POS_COLUMN ); |
+ fts3GetDeltaVarint(&p1, &iPos1); iPos1 -= 2; |
+ fts3GetDeltaVarint(&p2, &iPos2); iPos2 -= 2; |
- while( !plrAtEnd(&left) && !plrAtEnd(&right) ){ |
- if( plrColumn(&left)<plrColumn(&right) ){ |
- rc = plrStep(&left); |
- if( rc!=SQLITE_OK ) break; |
- }else if( plrColumn(&left)>plrColumn(&right) ){ |
- rc = plrStep(&right); |
- if( rc!=SQLITE_OK ) break; |
- }else if( plrPosition(&left)>=plrPosition(&right) ){ |
- rc = plrStep(&right); |
- if( rc!=SQLITE_OK ) break; |
- }else{ |
- if( (plrPosition(&right)-plrPosition(&left))<=(nNear+1) ){ |
- if( !match ){ |
- plwInit(&writer, pOut, dlrDocid(pLeft)); |
- match = 1; |
+ while( 1 ){ |
+ if( iPos2==iPos1+nToken |
+ || (isExact==0 && iPos2>iPos1 && iPos2<=iPos1+nToken) |
+ ){ |
+ sqlite3_int64 iSave; |
+ if( !pp ){ |
+ fts3PoslistCopy(0, &p2); |
+ fts3PoslistCopy(0, &p1); |
+ *pp1 = p1; |
+ *pp2 = p2; |
+ return 1; |
+ } |
+ iSave = isSaveLeft ? iPos1 : iPos2; |
+ fts3PutDeltaVarint(&p, &iPrev, iSave+2); iPrev -= 2; |
+ pSave = 0; |
} |
- if( !isSaveLeft ){ |
- plwAdd(&writer, plrColumn(&right), plrPosition(&right), 0, 0); |
+ if( (!isSaveLeft && iPos2<=(iPos1+nToken)) || iPos2<=iPos1 ){ |
+ if( (*p2&0xFE)==0 ) break; |
+ fts3GetDeltaVarint(&p2, &iPos2); iPos2 -= 2; |
}else{ |
- plwAdd(&writer, plrColumn(&left), plrPosition(&left), 0, 0); |
+ if( (*p1&0xFE)==0 ) break; |
+ fts3GetDeltaVarint(&p1, &iPos1); iPos1 -= 2; |
} |
- rc = plrStep(&right); |
- if( rc!=SQLITE_OK ) break; |
- }else{ |
- rc = plrStep(&left); |
- if( rc!=SQLITE_OK ) break; |
} |
+ |
+ if( pSave ){ |
+ assert( pp && p ); |
+ p = pSave; |
+ } |
+ |
+ fts3ColumnlistCopy(0, &p1); |
+ fts3ColumnlistCopy(0, &p2); |
+ assert( (*p1&0xFE)==0 && (*p2&0xFE)==0 ); |
+ if( 0==*p1 || 0==*p2 ) break; |
+ |
+ p1++; |
+ p1 += sqlite3Fts3GetVarint32(p1, &iCol1); |
+ p2++; |
+ p2 += sqlite3Fts3GetVarint32(p2, &iCol2); |
} |
- } |
- if( match ){ |
- plwTerminate(&writer); |
- plwDestroy(&writer); |
+ /* Advance pointer p1 or p2 (whichever corresponds to the smaller of |
+ ** iCol1 and iCol2) so that it points to either the 0x00 that marks the |
+ ** end of the position list, or the 0x01 that precedes the next |
+ ** column-number in the position list. |
+ */ |
+ else if( iCol1<iCol2 ){ |
+ fts3ColumnlistCopy(0, &p1); |
+ if( 0==*p1 ) break; |
+ p1++; |
+ p1 += sqlite3Fts3GetVarint32(p1, &iCol1); |
+ }else{ |
+ fts3ColumnlistCopy(0, &p2); |
+ if( 0==*p2 ) break; |
+ p2++; |
+ p2 += sqlite3Fts3GetVarint32(p2, &iCol2); |
+ } |
} |
- plrDestroy(&left); |
- plrDestroy(&right); |
- return rc; |
+ fts3PoslistCopy(0, &p2); |
+ fts3PoslistCopy(0, &p1); |
+ *pp1 = p1; |
+ *pp2 = p2; |
+ if( !pp || *pp==p ){ |
+ return 0; |
+ } |
+ *p++ = 0x00; |
+ *pp = p; |
+ return 1; |
} |
/* |
-** Compare the values pointed to by the PLReaders passed as arguments. |
-** Return -1 if the value pointed to by pLeft is considered less than |
-** the value pointed to by pRight, +1 if it is considered greater |
-** than it, or 0 if it is equal. i.e. |
-** |
-** (*pLeft - *pRight) |
-** |
-** A PLReader that is in the EOF condition is considered greater than |
-** any other. If neither argument is in EOF state, the return value of |
-** plrColumn() is used. If the plrColumn() values are equal, the |
-** comparison is on the basis of plrPosition(). |
-*/ |
-static int plrCompare(PLReader *pLeft, PLReader *pRight){ |
- assert(!plrAtEnd(pLeft) || !plrAtEnd(pRight)); |
+** Merge two position-lists as required by the NEAR operator. |
+*/ |
+static int fts3PoslistNearMerge( |
+ char **pp, /* Output buffer */ |
+ char *aTmp, /* Temporary buffer space */ |
+ int nRight, /* Maximum difference in token positions */ |
+ int nLeft, /* Maximum difference in token positions */ |
+ char **pp1, /* IN/OUT: Left input list */ |
+ char **pp2 /* IN/OUT: Right input list */ |
+){ |
+ char *p1 = *pp1; |
+ char *p2 = *pp2; |
+ |
+ if( !pp ){ |
+ if( fts3PoslistPhraseMerge(0, nRight, 0, 0, pp1, pp2) ) return 1; |
+ *pp1 = p1; |
+ *pp2 = p2; |
+ return fts3PoslistPhraseMerge(0, nLeft, 0, 0, pp2, pp1); |
+ }else{ |
+ char *pTmp1 = aTmp; |
+ char *pTmp2; |
+ char *aTmp2; |
+ int res = 1; |
+ |
+ fts3PoslistPhraseMerge(&pTmp1, nRight, 0, 0, pp1, pp2); |
+ aTmp2 = pTmp2 = pTmp1; |
+ *pp1 = p1; |
+ *pp2 = p2; |
+ fts3PoslistPhraseMerge(&pTmp2, nLeft, 1, 0, pp2, pp1); |
+ if( pTmp1!=aTmp && pTmp2!=aTmp2 ){ |
+ fts3PoslistMerge(pp, &aTmp, &aTmp2); |
+ }else if( pTmp1!=aTmp ){ |
+ fts3PoslistCopy(pp, &aTmp); |
+ }else if( pTmp2!=aTmp2 ){ |
+ fts3PoslistCopy(pp, &aTmp2); |
+ }else{ |
+ res = 0; |
+ } |
- if( plrAtEnd(pRight) || plrAtEnd(pLeft) ){ |
- return (plrAtEnd(pRight) ? -1 : 1); |
- } |
- if( plrColumn(pLeft)!=plrColumn(pRight) ){ |
- return ((plrColumn(pLeft)<plrColumn(pRight)) ? -1 : 1); |
+ return res; |
} |
- if( plrPosition(pLeft)!=plrPosition(pRight) ){ |
- return ((plrPosition(pLeft)<plrPosition(pRight)) ? -1 : 1); |
- } |
- return 0; |
} |
-/* We have two doclists with positions: pLeft and pRight. Depending |
-** on the value of the nNear parameter, perform either a phrase |
-** intersection (if nNear==0) or a NEAR intersection (if nNear>0) |
-** and write the results into pOut. |
-** |
-** A phrase intersection means that two documents only match |
-** if pLeft.iPos+1==pRight.iPos. |
-** |
-** A NEAR intersection means that two documents only match if |
-** (abs(pLeft.iPos-pRight.iPos)<nNear). |
-** |
-** If a NEAR intersection is requested, then the nPhrase argument should |
-** be passed the number of tokens in the two operands to the NEAR operator |
-** combined. For example: |
-** |
-** Query syntax nPhrase |
-** ------------------------------------ |
-** "A B C" NEAR "D E" 5 |
-** A NEAR B 2 |
-** |
-** iType controls the type of data written to pOut. If iType is |
-** DL_POSITIONS, the positions are those from pRight. |
+/* |
+** Values that may be used as the first parameter to fts3DoclistMerge(). |
*/ |
-static int docListPhraseMerge( |
- const char *pLeft, int nLeft, |
- const char *pRight, int nRight, |
- int nNear, /* 0 for a phrase merge, non-zero for a NEAR merge */ |
- int nPhrase, /* Number of tokens in left+right operands to NEAR */ |
- DocListType iType, /* Type of doclist to write to pOut */ |
- DataBuffer *pOut /* Write the combined doclist here */ |
+#define MERGE_NOT 2 /* D + D -> D */ |
+#define MERGE_AND 3 /* D + D -> D */ |
+#define MERGE_OR 4 /* D + D -> D */ |
+#define MERGE_POS_OR 5 /* P + P -> P */ |
+#define MERGE_PHRASE 6 /* P + P -> D */ |
+#define MERGE_POS_PHRASE 7 /* P + P -> P */ |
+#define MERGE_NEAR 8 /* P + P -> D */ |
+#define MERGE_POS_NEAR 9 /* P + P -> P */ |
+ |
+/* |
+** Merge the two doclists passed in buffer a1 (size n1 bytes) and a2 |
+** (size n2 bytes). The output is written to pre-allocated buffer aBuffer, |
+** which is guaranteed to be large enough to hold the results. The number |
+** of bytes written to aBuffer is stored in *pnBuffer before returning. |
+** |
+** If successful, SQLITE_OK is returned. Otherwise, if a malloc error |
+** occurs while allocating a temporary buffer as part of the merge operation, |
+** SQLITE_NOMEM is returned. |
+*/ |
+static int fts3DoclistMerge( |
+ int mergetype, /* One of the MERGE_XXX constants */ |
+ int nParam1, /* Used by MERGE_NEAR and MERGE_POS_NEAR */ |
+ int nParam2, /* Used by MERGE_NEAR and MERGE_POS_NEAR */ |
+ char *aBuffer, /* Pre-allocated output buffer */ |
+ int *pnBuffer, /* OUT: Bytes written to aBuffer */ |
+ char *a1, /* Buffer containing first doclist */ |
+ int n1, /* Size of buffer a1 */ |
+ char *a2, /* Buffer containing second doclist */ |
+ int n2, /* Size of buffer a2 */ |
+ int *pnDoc /* OUT: Number of docids in output */ |
){ |
- DLReader left, right; |
- DLWriter writer; |
- int rc; |
+ sqlite3_int64 i1 = 0; |
+ sqlite3_int64 i2 = 0; |
+ sqlite3_int64 iPrev = 0; |
+ |
+ char *p = aBuffer; |
+ char *p1 = a1; |
+ char *p2 = a2; |
+ char *pEnd1 = &a1[n1]; |
+ char *pEnd2 = &a2[n2]; |
+ int nDoc = 0; |
+ |
+ assert( mergetype==MERGE_OR || mergetype==MERGE_POS_OR |
+ || mergetype==MERGE_AND || mergetype==MERGE_NOT |
+ || mergetype==MERGE_PHRASE || mergetype==MERGE_POS_PHRASE |
+ || mergetype==MERGE_NEAR || mergetype==MERGE_POS_NEAR |
+ ); |
- /* These two buffers are used in the 'while', but are declared here |
- ** to simplify error-handling. |
- */ |
- DataBuffer one = {0, 0, 0}; |
- DataBuffer two = {0, 0, 0}; |
+ if( !aBuffer ){ |
+ *pnBuffer = 0; |
+ return SQLITE_NOMEM; |
+ } |
- if( nLeft==0 || nRight==0 ) return SQLITE_OK; |
+ /* Read the first docid from each doclist */ |
+ fts3GetDeltaVarint2(&p1, pEnd1, &i1); |
+ fts3GetDeltaVarint2(&p2, pEnd2, &i2); |
+ |
+ switch( mergetype ){ |
+ case MERGE_OR: |
+ case MERGE_POS_OR: |
+ while( p1 || p2 ){ |
+ if( p2 && p1 && i1==i2 ){ |
+ fts3PutDeltaVarint(&p, &iPrev, i1); |
+ if( mergetype==MERGE_POS_OR ) fts3PoslistMerge(&p, &p1, &p2); |
+ fts3GetDeltaVarint2(&p1, pEnd1, &i1); |
+ fts3GetDeltaVarint2(&p2, pEnd2, &i2); |
+ }else if( !p2 || (p1 && i1<i2) ){ |
+ fts3PutDeltaVarint(&p, &iPrev, i1); |
+ if( mergetype==MERGE_POS_OR ) fts3PoslistCopy(&p, &p1); |
+ fts3GetDeltaVarint2(&p1, pEnd1, &i1); |
+ }else{ |
+ fts3PutDeltaVarint(&p, &iPrev, i2); |
+ if( mergetype==MERGE_POS_OR ) fts3PoslistCopy(&p, &p2); |
+ fts3GetDeltaVarint2(&p2, pEnd2, &i2); |
+ } |
+ } |
+ break; |
- assert( iType!=DL_POSITIONS_OFFSETS ); |
+ case MERGE_AND: |
+ while( p1 && p2 ){ |
+ if( i1==i2 ){ |
+ fts3PutDeltaVarint(&p, &iPrev, i1); |
+ fts3GetDeltaVarint2(&p1, pEnd1, &i1); |
+ fts3GetDeltaVarint2(&p2, pEnd2, &i2); |
+ nDoc++; |
+ }else if( i1<i2 ){ |
+ fts3GetDeltaVarint2(&p1, pEnd1, &i1); |
+ }else{ |
+ fts3GetDeltaVarint2(&p2, pEnd2, &i2); |
+ } |
+ } |
+ break; |
- rc = dlrInit(&left, DL_POSITIONS, pLeft, nLeft); |
- if( rc!=SQLITE_OK ) return rc; |
- rc = dlrInit(&right, DL_POSITIONS, pRight, nRight); |
- if( rc!=SQLITE_OK ){ |
- dlrDestroy(&left); |
- return rc; |
- } |
- dlwInit(&writer, iType, pOut); |
- |
- while( !dlrAtEnd(&left) && !dlrAtEnd(&right) ){ |
- if( dlrDocid(&left)<dlrDocid(&right) ){ |
- rc = dlrStep(&left); |
- if( rc!=SQLITE_OK ) goto err; |
- }else if( dlrDocid(&right)<dlrDocid(&left) ){ |
- rc = dlrStep(&right); |
- if( rc!=SQLITE_OK ) goto err; |
- }else{ |
- if( nNear==0 ){ |
- rc = posListPhraseMerge(&left, &right, 0, 0, &writer); |
- if( rc!=SQLITE_OK ) goto err; |
- }else{ |
- /* This case occurs when two terms (simple terms or phrases) are |
- * connected by a NEAR operator, span (nNear+1). i.e. |
- * |
- * '"terrible company" NEAR widget' |
- */ |
- DLWriter dlwriter2; |
- DLReader dr1 = {0, 0, 0, 0, 0}; |
- DLReader dr2 = {0, 0, 0, 0, 0}; |
- |
- dlwInit(&dlwriter2, iType, &one); |
- rc = posListPhraseMerge(&right, &left, nNear-3+nPhrase, 1, &dlwriter2); |
- if( rc!=SQLITE_OK ) goto err; |
- dlwInit(&dlwriter2, iType, &two); |
- rc = posListPhraseMerge(&left, &right, nNear-1, 0, &dlwriter2); |
- if( rc!=SQLITE_OK ) goto err; |
- |
- if( one.nData){ |
- rc = dlrInit(&dr1, iType, one.pData, one.nData); |
- if( rc!=SQLITE_OK ) goto err; |
+ case MERGE_NOT: |
+ while( p1 ){ |
+ if( p2 && i1==i2 ){ |
+ fts3GetDeltaVarint2(&p1, pEnd1, &i1); |
+ fts3GetDeltaVarint2(&p2, pEnd2, &i2); |
+ }else if( !p2 || i1<i2 ){ |
+ fts3PutDeltaVarint(&p, &iPrev, i1); |
+ fts3GetDeltaVarint2(&p1, pEnd1, &i1); |
+ }else{ |
+ fts3GetDeltaVarint2(&p2, pEnd2, &i2); |
} |
- if( two.nData){ |
- rc = dlrInit(&dr2, iType, two.pData, two.nData); |
- if( rc!=SQLITE_OK ) goto err; |
+ } |
+ break; |
+ |
+ case MERGE_POS_PHRASE: |
+ case MERGE_PHRASE: { |
+ char **ppPos = (mergetype==MERGE_PHRASE ? 0 : &p); |
+ while( p1 && p2 ){ |
+ if( i1==i2 ){ |
+ char *pSave = p; |
+ sqlite3_int64 iPrevSave = iPrev; |
+ fts3PutDeltaVarint(&p, &iPrev, i1); |
+ if( 0==fts3PoslistPhraseMerge(ppPos, nParam1, 0, 1, &p1, &p2) ){ |
+ p = pSave; |
+ iPrev = iPrevSave; |
+ }else{ |
+ nDoc++; |
+ } |
+ fts3GetDeltaVarint2(&p1, pEnd1, &i1); |
+ fts3GetDeltaVarint2(&p2, pEnd2, &i2); |
+ }else if( i1<i2 ){ |
+ fts3PoslistCopy(0, &p1); |
+ fts3GetDeltaVarint2(&p1, pEnd1, &i1); |
+ }else{ |
+ fts3PoslistCopy(0, &p2); |
+ fts3GetDeltaVarint2(&p2, pEnd2, &i2); |
} |
+ } |
+ break; |
+ } |
- if( !dlrAtEnd(&dr1) || !dlrAtEnd(&dr2) ){ |
- PLReader pr1 = {0}; |
- PLReader pr2 = {0}; |
+ default: assert( mergetype==MERGE_POS_NEAR || mergetype==MERGE_NEAR ); { |
+ char *aTmp = 0; |
+ char **ppPos = 0; |
- PLWriter plwriter; |
- plwInit(&plwriter, &writer, dlrDocid(dlrAtEnd(&dr1)?&dr2:&dr1)); |
+ if( mergetype==MERGE_POS_NEAR ){ |
+ ppPos = &p; |
+ aTmp = sqlite3_malloc(2*(n1+n2+1)); |
+ if( !aTmp ){ |
+ return SQLITE_NOMEM; |
+ } |
+ } |
- if( one.nData ){ |
- rc = plrInit(&pr1, &dr1); |
- if( rc!=SQLITE_OK ) goto err; |
- } |
- if( two.nData ){ |
- rc = plrInit(&pr2, &dr2); |
- if( rc!=SQLITE_OK ) goto err; |
- } |
- while( !plrAtEnd(&pr1) || !plrAtEnd(&pr2) ){ |
- int iCompare = plrCompare(&pr1, &pr2); |
- switch( iCompare ){ |
- case -1: |
- plwCopy(&plwriter, &pr1); |
- rc = plrStep(&pr1); |
- if( rc!=SQLITE_OK ) goto err; |
- break; |
- case 1: |
- plwCopy(&plwriter, &pr2); |
- rc = plrStep(&pr2); |
- if( rc!=SQLITE_OK ) goto err; |
- break; |
- case 0: |
- plwCopy(&plwriter, &pr1); |
- rc = plrStep(&pr1); |
- if( rc!=SQLITE_OK ) goto err; |
- rc = plrStep(&pr2); |
- if( rc!=SQLITE_OK ) goto err; |
- break; |
- } |
+ while( p1 && p2 ){ |
+ if( i1==i2 ){ |
+ char *pSave = p; |
+ sqlite3_int64 iPrevSave = iPrev; |
+ fts3PutDeltaVarint(&p, &iPrev, i1); |
+ |
+ if( !fts3PoslistNearMerge(ppPos, aTmp, nParam1, nParam2, &p1, &p2) ){ |
+ iPrev = iPrevSave; |
+ p = pSave; |
} |
- plwTerminate(&plwriter); |
+ |
+ fts3GetDeltaVarint2(&p1, pEnd1, &i1); |
+ fts3GetDeltaVarint2(&p2, pEnd2, &i2); |
+ }else if( i1<i2 ){ |
+ fts3PoslistCopy(0, &p1); |
+ fts3GetDeltaVarint2(&p1, pEnd1, &i1); |
+ }else{ |
+ fts3PoslistCopy(0, &p2); |
+ fts3GetDeltaVarint2(&p2, pEnd2, &i2); |
} |
- dataBufferReset(&one); |
- dataBufferReset(&two); |
} |
- rc = dlrStep(&left); |
- if( rc!=SQLITE_OK ) goto err; |
- rc = dlrStep(&right); |
- if( rc!=SQLITE_OK ) goto err; |
+ sqlite3_free(aTmp); |
+ break; |
} |
} |
-err: |
- dataBufferDestroy(&one); |
- dataBufferDestroy(&two); |
- dlrDestroy(&left); |
- dlrDestroy(&right); |
- dlwDestroy(&writer); |
- return rc; |
+ if( pnDoc ) *pnDoc = nDoc; |
+ *pnBuffer = (int)(p-aBuffer); |
+ return SQLITE_OK; |
} |
-/* We have two DL_DOCIDS doclists: pLeft and pRight. |
-** Write the intersection of these two doclists into pOut as a |
-** DL_DOCIDS doclist. |
-*/ |
-static int docListAndMerge( |
- const char *pLeft, int nLeft, |
- const char *pRight, int nRight, |
- DataBuffer *pOut /* Write the combined doclist here */ |
-){ |
- DLReader left, right; |
- DLWriter writer; |
- int rc; |
+/* |
+** A pointer to an instance of this structure is used as the context |
+** argument to sqlite3Fts3SegReaderIterate() |
+*/ |
+typedef struct TermSelect TermSelect; |
+struct TermSelect { |
+ int isReqPos; |
+ char *aaOutput[16]; /* Malloc'd output buffer */ |
+ int anOutput[16]; /* Size of output in bytes */ |
+}; |
- if( nLeft==0 || nRight==0 ) return SQLITE_OK; |
+/* |
+** Merge all doclists in the TermSelect.aaOutput[] array into a single |
+** doclist stored in TermSelect.aaOutput[0]. If successful, delete all |
+** other doclists (except the aaOutput[0] one) and return SQLITE_OK. |
+** |
+** If an OOM error occurs, return SQLITE_NOMEM. In this case it is |
+** the responsibility of the caller to free any doclists left in the |
+** TermSelect.aaOutput[] array. |
+*/ |
+static int fts3TermSelectMerge(TermSelect *pTS){ |
+ int mergetype = (pTS->isReqPos ? MERGE_POS_OR : MERGE_OR); |
+ char *aOut = 0; |
+ int nOut = 0; |
+ int i; |
- rc = dlrInit(&left, DL_DOCIDS, pLeft, nLeft); |
- if( rc!=SQLITE_OK ) return rc; |
- rc = dlrInit(&right, DL_DOCIDS, pRight, nRight); |
- if( rc!=SQLITE_OK ){ |
- dlrDestroy(&left); |
- return rc; |
- } |
- dlwInit(&writer, DL_DOCIDS, pOut); |
- |
- while( !dlrAtEnd(&left) && !dlrAtEnd(&right) ){ |
- if( dlrDocid(&left)<dlrDocid(&right) ){ |
- rc = dlrStep(&left); |
- if( rc!=SQLITE_OK ) break; |
- }else if( dlrDocid(&right)<dlrDocid(&left) ){ |
- rc = dlrStep(&right); |
- if( rc!=SQLITE_OK ) break; |
- }else{ |
- dlwAdd(&writer, dlrDocid(&left)); |
- rc = dlrStep(&left); |
- if( rc!=SQLITE_OK ) break; |
- rc = dlrStep(&right); |
- if( rc!=SQLITE_OK ) break; |
+ /* Loop through the doclists in the aaOutput[] array. Merge them all |
+ ** into a single doclist. |
+ */ |
+ for(i=0; i<SizeofArray(pTS->aaOutput); i++){ |
+ if( pTS->aaOutput[i] ){ |
+ if( !aOut ){ |
+ aOut = pTS->aaOutput[i]; |
+ nOut = pTS->anOutput[i]; |
+ pTS->aaOutput[i] = 0; |
+ }else{ |
+ int nNew = nOut + pTS->anOutput[i]; |
+ char *aNew = sqlite3_malloc(nNew); |
+ if( !aNew ){ |
+ sqlite3_free(aOut); |
+ return SQLITE_NOMEM; |
+ } |
+ fts3DoclistMerge(mergetype, 0, 0, |
+ aNew, &nNew, pTS->aaOutput[i], pTS->anOutput[i], aOut, nOut, 0 |
+ ); |
+ sqlite3_free(pTS->aaOutput[i]); |
+ sqlite3_free(aOut); |
+ pTS->aaOutput[i] = 0; |
+ aOut = aNew; |
+ nOut = nNew; |
+ } |
} |
} |
- dlrDestroy(&left); |
- dlrDestroy(&right); |
- dlwDestroy(&writer); |
- return rc; |
+ pTS->aaOutput[0] = aOut; |
+ pTS->anOutput[0] = nOut; |
+ return SQLITE_OK; |
} |
-/* We have two DL_DOCIDS doclists: pLeft and pRight. |
-** Write the union of these two doclists into pOut as a |
-** DL_DOCIDS doclist. |
-*/ |
-static int docListOrMerge( |
- const char *pLeft, int nLeft, |
- const char *pRight, int nRight, |
- DataBuffer *pOut /* Write the combined doclist here */ |
+/* |
+** This function is used as the sqlite3Fts3SegReaderIterate() callback when |
+** querying the full-text index for a doclist associated with a term or |
+** term-prefix. |
+*/ |
+static int fts3TermSelectCb( |
+ Fts3Table *p, /* Virtual table object */ |
+ void *pContext, /* Pointer to TermSelect structure */ |
+ char *zTerm, |
+ int nTerm, |
+ char *aDoclist, |
+ int nDoclist |
){ |
- DLReader left, right; |
- DLWriter writer; |
- int rc; |
+ TermSelect *pTS = (TermSelect *)pContext; |
- if( nLeft==0 ){ |
- if( nRight!=0 ) dataBufferAppend(pOut, pRight, nRight); |
- return SQLITE_OK; |
- } |
- if( nRight==0 ){ |
- dataBufferAppend(pOut, pLeft, nLeft); |
- return SQLITE_OK; |
- } |
+ UNUSED_PARAMETER(p); |
+ UNUSED_PARAMETER(zTerm); |
+ UNUSED_PARAMETER(nTerm); |
- rc = dlrInit(&left, DL_DOCIDS, pLeft, nLeft); |
- if( rc!=SQLITE_OK ) return rc; |
- rc = dlrInit(&right, DL_DOCIDS, pRight, nRight); |
- if( rc!=SQLITE_OK ){ |
- dlrDestroy(&left); |
- return rc; |
- } |
- dlwInit(&writer, DL_DOCIDS, pOut); |
- |
- while( !dlrAtEnd(&left) || !dlrAtEnd(&right) ){ |
- if( dlrAtEnd(&right) ){ |
- dlwAdd(&writer, dlrDocid(&left)); |
- rc = dlrStep(&left); |
- if( rc!=SQLITE_OK ) break; |
- }else if( dlrAtEnd(&left) ){ |
- dlwAdd(&writer, dlrDocid(&right)); |
- rc = dlrStep(&right); |
- if( rc!=SQLITE_OK ) break; |
- }else if( dlrDocid(&left)<dlrDocid(&right) ){ |
- dlwAdd(&writer, dlrDocid(&left)); |
- rc = dlrStep(&left); |
- if( rc!=SQLITE_OK ) break; |
- }else if( dlrDocid(&right)<dlrDocid(&left) ){ |
- dlwAdd(&writer, dlrDocid(&right)); |
- rc = dlrStep(&right); |
- if( rc!=SQLITE_OK ) break; |
+ if( pTS->aaOutput[0]==0 ){ |
+ /* If this is the first term selected, copy the doclist to the output |
+ ** buffer using memcpy(). TODO: Add a way to transfer control of the |
+ ** aDoclist buffer from the caller so as to avoid the memcpy(). |
+ */ |
+ pTS->aaOutput[0] = sqlite3_malloc(nDoclist); |
+ pTS->anOutput[0] = nDoclist; |
+ if( pTS->aaOutput[0] ){ |
+ memcpy(pTS->aaOutput[0], aDoclist, nDoclist); |
}else{ |
- dlwAdd(&writer, dlrDocid(&left)); |
- rc = dlrStep(&left); |
- if( rc!=SQLITE_OK ) break; |
- rc = dlrStep(&right); |
- if( rc!=SQLITE_OK ) break; |
- } |
- } |
- |
- dlrDestroy(&left); |
- dlrDestroy(&right); |
- dlwDestroy(&writer); |
- return rc; |
-} |
- |
-/* We have two DL_DOCIDS doclists: pLeft and pRight. |
-** Write into pOut as DL_DOCIDS doclist containing all documents that |
-** occur in pLeft but not in pRight. |
-*/ |
-static int docListExceptMerge( |
- const char *pLeft, int nLeft, |
- const char *pRight, int nRight, |
- DataBuffer *pOut /* Write the combined doclist here */ |
-){ |
- DLReader left, right; |
- DLWriter writer; |
- int rc; |
- |
- if( nLeft==0 ) return SQLITE_OK; |
- if( nRight==0 ){ |
- dataBufferAppend(pOut, pLeft, nLeft); |
- return SQLITE_OK; |
- } |
- |
- rc = dlrInit(&left, DL_DOCIDS, pLeft, nLeft); |
- if( rc!=SQLITE_OK ) return rc; |
- rc = dlrInit(&right, DL_DOCIDS, pRight, nRight); |
- if( rc!=SQLITE_OK ){ |
- dlrDestroy(&left); |
- return rc; |
- } |
- dlwInit(&writer, DL_DOCIDS, pOut); |
- |
- while( !dlrAtEnd(&left) ){ |
- while( !dlrAtEnd(&right) && dlrDocid(&right)<dlrDocid(&left) ){ |
- rc = dlrStep(&right); |
- if( rc!=SQLITE_OK ) goto err; |
- } |
- if( dlrAtEnd(&right) || dlrDocid(&left)<dlrDocid(&right) ){ |
- dlwAdd(&writer, dlrDocid(&left)); |
- } |
- rc = dlrStep(&left); |
- if( rc!=SQLITE_OK ) break; |
- } |
- |
-err: |
- dlrDestroy(&left); |
- dlrDestroy(&right); |
- dlwDestroy(&writer); |
- return rc; |
-} |
- |
-static char *string_dup_n(const char *s, int n){ |
- char *str = sqlite3_malloc(n + 1); |
- memcpy(str, s, n); |
- str[n] = '\0'; |
- return str; |
-} |
- |
-/* Duplicate a string; the caller must free() the returned string. |
- * (We don't use strdup() since it is not part of the standard C library and |
- * may not be available everywhere.) */ |
-static char *string_dup(const char *s){ |
- return string_dup_n(s, strlen(s)); |
-} |
- |
-/* Format a string, replacing each occurrence of the % character with |
- * zDb.zName. This may be more convenient than sqlite_mprintf() |
- * when one string is used repeatedly in a format string. |
- * The caller must free() the returned string. */ |
-static char *string_format(const char *zFormat, |
- const char *zDb, const char *zName){ |
- const char *p; |
- size_t len = 0; |
- size_t nDb = strlen(zDb); |
- size_t nName = strlen(zName); |
- size_t nFullTableName = nDb+1+nName; |
- char *result; |
- char *r; |
- |
- /* first compute length needed */ |
- for(p = zFormat ; *p ; ++p){ |
- len += (*p=='%' ? nFullTableName : 1); |
- } |
- len += 1; /* for null terminator */ |
- |
- r = result = sqlite3_malloc(len); |
- for(p = zFormat; *p; ++p){ |
- if( *p=='%' ){ |
- memcpy(r, zDb, nDb); |
- r += nDb; |
- *r++ = '.'; |
- memcpy(r, zName, nName); |
- r += nName; |
- } else { |
- *r++ = *p; |
- } |
- } |
- *r++ = '\0'; |
- assert( r == result + len ); |
- return result; |
-} |
- |
-static int sql_exec(sqlite3 *db, const char *zDb, const char *zName, |
- const char *zFormat){ |
- char *zCommand = string_format(zFormat, zDb, zName); |
- int rc; |
- FTSTRACE(("FTS3 sql: %s\n", zCommand)); |
- rc = sqlite3_exec(db, zCommand, NULL, 0, NULL); |
- sqlite3_free(zCommand); |
- return rc; |
-} |
- |
-static int sql_prepare(sqlite3 *db, const char *zDb, const char *zName, |
- sqlite3_stmt **ppStmt, const char *zFormat){ |
- char *zCommand = string_format(zFormat, zDb, zName); |
- int rc; |
- FTSTRACE(("FTS3 prepare: %s\n", zCommand)); |
- rc = sqlite3_prepare_v2(db, zCommand, -1, ppStmt, NULL); |
- sqlite3_free(zCommand); |
- return rc; |
-} |
- |
-/* end utility functions */ |
- |
-/* Forward reference */ |
-typedef struct fulltext_vtab fulltext_vtab; |
- |
-/* |
-** An instance of the following structure keeps track of generated |
-** matching-word offset information and snippets. |
-*/ |
-typedef struct Snippet { |
- int nMatch; /* Total number of matches */ |
- int nAlloc; /* Space allocated for aMatch[] */ |
- struct snippetMatch { /* One entry for each matching term */ |
- char snStatus; /* Status flag for use while constructing snippets */ |
- short int iCol; /* The column that contains the match */ |
- short int iTerm; /* The index in Query.pTerms[] of the matching term */ |
- int iToken; /* The index of the matching document token */ |
- short int nByte; /* Number of bytes in the term */ |
- int iStart; /* The offset to the first character of the term */ |
- } *aMatch; /* Points to space obtained from malloc */ |
- char *zOffset; /* Text rendering of aMatch[] */ |
- int nOffset; /* strlen(zOffset) */ |
- char *zSnippet; /* Snippet text */ |
- int nSnippet; /* strlen(zSnippet) */ |
-} Snippet; |
- |
- |
-typedef enum QueryType { |
- QUERY_GENERIC, /* table scan */ |
- QUERY_DOCID, /* lookup by docid */ |
- QUERY_FULLTEXT /* QUERY_FULLTEXT + [i] is a full-text search for column i*/ |
-} QueryType; |
- |
-typedef enum fulltext_statement { |
- CONTENT_INSERT_STMT, |
- CONTENT_SELECT_STMT, |
- CONTENT_UPDATE_STMT, |
- CONTENT_DELETE_STMT, |
- CONTENT_EXISTS_STMT, |
- |
- BLOCK_INSERT_STMT, |
- BLOCK_SELECT_STMT, |
- BLOCK_DELETE_STMT, |
- BLOCK_DELETE_ALL_STMT, |
- |
- SEGDIR_MAX_INDEX_STMT, |
- SEGDIR_SET_STMT, |
- SEGDIR_SELECT_LEVEL_STMT, |
- SEGDIR_SPAN_STMT, |
- SEGDIR_DELETE_STMT, |
- SEGDIR_SELECT_SEGMENT_STMT, |
- SEGDIR_SELECT_ALL_STMT, |
- SEGDIR_DELETE_ALL_STMT, |
- SEGDIR_COUNT_STMT, |
- |
- MAX_STMT /* Always at end! */ |
-} fulltext_statement; |
- |
-/* These must exactly match the enum above. */ |
-/* TODO(shess): Is there some risk that a statement will be used in two |
-** cursors at once, e.g. if a query joins a virtual table to itself? |
-** If so perhaps we should move some of these to the cursor object. |
-*/ |
-static const char *const fulltext_zStatement[MAX_STMT] = { |
- /* CONTENT_INSERT */ NULL, /* generated in contentInsertStatement() */ |
- /* CONTENT_SELECT */ NULL, /* generated in contentSelectStatement() */ |
- /* CONTENT_UPDATE */ NULL, /* generated in contentUpdateStatement() */ |
- /* CONTENT_DELETE */ "delete from %_content where docid = ?", |
- /* CONTENT_EXISTS */ "select docid from %_content limit 1", |
- |
- /* BLOCK_INSERT */ |
- "insert into %_segments (blockid, block) values (null, ?)", |
- /* BLOCK_SELECT */ "select block from %_segments where blockid = ?", |
- /* BLOCK_DELETE */ "delete from %_segments where blockid between ? and ?", |
- /* BLOCK_DELETE_ALL */ "delete from %_segments", |
- |
- /* SEGDIR_MAX_INDEX */ "select max(idx) from %_segdir where level = ?", |
- /* SEGDIR_SET */ "insert into %_segdir values (?, ?, ?, ?, ?, ?)", |
- /* SEGDIR_SELECT_LEVEL */ |
- "select start_block, leaves_end_block, root, idx from %_segdir " |
- " where level = ? order by idx", |
- /* SEGDIR_SPAN */ |
- "select min(start_block), max(end_block) from %_segdir " |
- " where level = ? and start_block <> 0", |
- /* SEGDIR_DELETE */ "delete from %_segdir where level = ?", |
- |
- /* NOTE(shess): The first three results of the following two |
- ** statements must match. |
- */ |
- /* SEGDIR_SELECT_SEGMENT */ |
- "select start_block, leaves_end_block, root from %_segdir " |
- " where level = ? and idx = ?", |
- /* SEGDIR_SELECT_ALL */ |
- "select start_block, leaves_end_block, root from %_segdir " |
- " order by level desc, idx asc", |
- /* SEGDIR_DELETE_ALL */ "delete from %_segdir", |
- /* SEGDIR_COUNT */ "select count(*), ifnull(max(level),0) from %_segdir", |
-}; |
- |
-/* |
-** A connection to a fulltext index is an instance of the following |
-** structure. The xCreate and xConnect methods create an instance |
-** of this structure and xDestroy and xDisconnect free that instance. |
-** All other methods receive a pointer to the structure as one of their |
-** arguments. |
-*/ |
-struct fulltext_vtab { |
- sqlite3_vtab base; /* Base class used by SQLite core */ |
- sqlite3 *db; /* The database connection */ |
- const char *zDb; /* logical database name */ |
- const char *zName; /* virtual table name */ |
- int nColumn; /* number of columns in virtual table */ |
- char **azColumn; /* column names. malloced */ |
- char **azContentColumn; /* column names in content table; malloced */ |
- sqlite3_tokenizer *pTokenizer; /* tokenizer for inserts and queries */ |
- |
- /* Precompiled statements which we keep as long as the table is |
- ** open. |
- */ |
- sqlite3_stmt *pFulltextStatements[MAX_STMT]; |
- |
- /* Precompiled statements used for segment merges. We run a |
- ** separate select across the leaf level of each tree being merged. |
- */ |
- sqlite3_stmt *pLeafSelectStmts[MERGE_COUNT]; |
- /* The statement used to prepare pLeafSelectStmts. */ |
-#define LEAF_SELECT \ |
- "select block from %_segments where blockid between ? and ? order by blockid" |
- |
- /* These buffer pending index updates during transactions. |
- ** nPendingData estimates the memory size of the pending data. It |
- ** doesn't include the hash-bucket overhead, nor any malloc |
- ** overhead. When nPendingData exceeds kPendingThreshold, the |
- ** buffer is flushed even before the transaction closes. |
- ** pendingTerms stores the data, and is only valid when nPendingData |
- ** is >=0 (nPendingData<0 means pendingTerms has not been |
- ** initialized). iPrevDocid is the last docid written, used to make |
- ** certain we're inserting in sorted order. |
- */ |
- int nPendingData; |
-#define kPendingThreshold (1*1024*1024) |
- sqlite_int64 iPrevDocid; |
- fts3Hash pendingTerms; |
-}; |
- |
-/* |
-** When the core wants to do a query, it create a cursor using a |
-** call to xOpen. This structure is an instance of a cursor. It |
-** is destroyed by xClose. |
-*/ |
-typedef struct fulltext_cursor { |
- sqlite3_vtab_cursor base; /* Base class used by SQLite core */ |
- QueryType iCursorType; /* Copy of sqlite3_index_info.idxNum */ |
- sqlite3_stmt *pStmt; /* Prepared statement in use by the cursor */ |
- int eof; /* True if at End Of Results */ |
- Fts3Expr *pExpr; /* Parsed MATCH query string */ |
- Snippet snippet; /* Cached snippet for the current row */ |
- int iColumn; /* Column being searched */ |
- DataBuffer result; /* Doclist results from fulltextQuery */ |
- DLReader reader; /* Result reader if result not empty */ |
-} fulltext_cursor; |
- |
-static fulltext_vtab *cursor_vtab(fulltext_cursor *c){ |
- return (fulltext_vtab *) c->base.pVtab; |
-} |
- |
-static const sqlite3_module fts3Module; /* forward declaration */ |
- |
-/* Return a dynamically generated statement of the form |
- * insert into %_content (docid, ...) values (?, ...) |
- */ |
-static const char *contentInsertStatement(fulltext_vtab *v){ |
- StringBuffer sb; |
- int i; |
- |
- initStringBuffer(&sb); |
- append(&sb, "insert into %_content (docid, "); |
- appendList(&sb, v->nColumn, v->azContentColumn); |
- append(&sb, ") values (?"); |
- for(i=0; i<v->nColumn; ++i) |
- append(&sb, ", ?"); |
- append(&sb, ")"); |
- return stringBufferData(&sb); |
-} |
- |
-/* Return a dynamically generated statement of the form |
- * select <content columns> from %_content where docid = ? |
- */ |
-static const char *contentSelectStatement(fulltext_vtab *v){ |
- StringBuffer sb; |
- initStringBuffer(&sb); |
- append(&sb, "SELECT "); |
- appendList(&sb, v->nColumn, v->azContentColumn); |
- append(&sb, " FROM %_content WHERE docid = ?"); |
- return stringBufferData(&sb); |
-} |
- |
-/* Return a dynamically generated statement of the form |
- * update %_content set [col_0] = ?, [col_1] = ?, ... |
- * where docid = ? |
- */ |
-static const char *contentUpdateStatement(fulltext_vtab *v){ |
- StringBuffer sb; |
- int i; |
- |
- initStringBuffer(&sb); |
- append(&sb, "update %_content set "); |
- for(i=0; i<v->nColumn; ++i) { |
- if( i>0 ){ |
- append(&sb, ", "); |
- } |
- append(&sb, v->azContentColumn[i]); |
- append(&sb, " = ?"); |
- } |
- append(&sb, " where docid = ?"); |
- return stringBufferData(&sb); |
-} |
- |
-/* Puts a freshly-prepared statement determined by iStmt in *ppStmt. |
-** If the indicated statement has never been prepared, it is prepared |
-** and cached, otherwise the cached version is reset. |
-*/ |
-static int sql_get_statement(fulltext_vtab *v, fulltext_statement iStmt, |
- sqlite3_stmt **ppStmt){ |
- assert( iStmt<MAX_STMT ); |
- if( v->pFulltextStatements[iStmt]==NULL ){ |
- const char *zStmt; |
- int rc; |
- switch( iStmt ){ |
- case CONTENT_INSERT_STMT: |
- zStmt = contentInsertStatement(v); break; |
- case CONTENT_SELECT_STMT: |
- zStmt = contentSelectStatement(v); break; |
- case CONTENT_UPDATE_STMT: |
- zStmt = contentUpdateStatement(v); break; |
- default: |
- zStmt = fulltext_zStatement[iStmt]; |
- } |
- rc = sql_prepare(v->db, v->zDb, v->zName, &v->pFulltextStatements[iStmt], |
- zStmt); |
- if( zStmt != fulltext_zStatement[iStmt]) sqlite3_free((void *) zStmt); |
- if( rc!=SQLITE_OK ) return rc; |
- } else { |
- int rc = sqlite3_reset(v->pFulltextStatements[iStmt]); |
- if( rc!=SQLITE_OK ) return rc; |
- } |
- |
- *ppStmt = v->pFulltextStatements[iStmt]; |
- return SQLITE_OK; |
-} |
- |
-/* Like sqlite3_step(), but convert SQLITE_DONE to SQLITE_OK and |
-** SQLITE_ROW to SQLITE_ERROR. Useful for statements like UPDATE, |
-** where we expect no results. |
-*/ |
-static int sql_single_step(sqlite3_stmt *s){ |
- int rc = sqlite3_step(s); |
- return (rc==SQLITE_DONE) ? SQLITE_OK : rc; |
-} |
- |
-/* Like sql_get_statement(), but for special replicated LEAF_SELECT |
-** statements. idx -1 is a special case for an uncached version of |
-** the statement (used in the optimize implementation). |
-*/ |
-/* TODO(shess) Write version for generic statements and then share |
-** that between the cached-statement functions. |
-*/ |
-static int sql_get_leaf_statement(fulltext_vtab *v, int idx, |
- sqlite3_stmt **ppStmt){ |
- assert( idx>=-1 && idx<MERGE_COUNT ); |
- if( idx==-1 ){ |
- return sql_prepare(v->db, v->zDb, v->zName, ppStmt, LEAF_SELECT); |
- }else if( v->pLeafSelectStmts[idx]==NULL ){ |
- int rc = sql_prepare(v->db, v->zDb, v->zName, &v->pLeafSelectStmts[idx], |
- LEAF_SELECT); |
- if( rc!=SQLITE_OK ) return rc; |
- }else{ |
- int rc = sqlite3_reset(v->pLeafSelectStmts[idx]); |
- if( rc!=SQLITE_OK ) return rc; |
- } |
- |
- *ppStmt = v->pLeafSelectStmts[idx]; |
- return SQLITE_OK; |
-} |
- |
-/* insert into %_content (docid, ...) values ([docid], [pValues]) |
-** If the docid contains SQL NULL, then a unique docid will be |
-** generated. |
-*/ |
-static int content_insert(fulltext_vtab *v, sqlite3_value *docid, |
- sqlite3_value **pValues){ |
- sqlite3_stmt *s; |
- int i; |
- int rc = sql_get_statement(v, CONTENT_INSERT_STMT, &s); |
- if( rc!=SQLITE_OK ) return rc; |
- |
- rc = sqlite3_bind_value(s, 1, docid); |
- if( rc!=SQLITE_OK ) return rc; |
- |
- for(i=0; i<v->nColumn; ++i){ |
- rc = sqlite3_bind_value(s, 2+i, pValues[i]); |
- if( rc!=SQLITE_OK ) return rc; |
- } |
- |
- return sql_single_step(s); |
-} |
- |
-/* update %_content set col0 = pValues[0], col1 = pValues[1], ... |
- * where docid = [iDocid] */ |
-static int content_update(fulltext_vtab *v, sqlite3_value **pValues, |
- sqlite_int64 iDocid){ |
- sqlite3_stmt *s; |
- int i; |
- int rc = sql_get_statement(v, CONTENT_UPDATE_STMT, &s); |
- if( rc!=SQLITE_OK ) return rc; |
- |
- for(i=0; i<v->nColumn; ++i){ |
- rc = sqlite3_bind_value(s, 1+i, pValues[i]); |
- if( rc!=SQLITE_OK ) return rc; |
- } |
- |
- rc = sqlite3_bind_int64(s, 1+v->nColumn, iDocid); |
- if( rc!=SQLITE_OK ) return rc; |
- |
- return sql_single_step(s); |
-} |
- |
-static void freeStringArray(int nString, const char **pString){ |
- int i; |
- |
- for (i=0 ; i < nString ; ++i) { |
- if( pString[i]!=NULL ) sqlite3_free((void *) pString[i]); |
- } |
- sqlite3_free((void *) pString); |
-} |
- |
-/* select * from %_content where docid = [iDocid] |
- * The caller must delete the returned array and all strings in it. |
- * null fields will be NULL in the returned array. |
- * |
- * TODO: Perhaps we should return pointer/length strings here for consistency |
- * with other code which uses pointer/length. */ |
-static int content_select(fulltext_vtab *v, sqlite_int64 iDocid, |
- const char ***pValues){ |
- sqlite3_stmt *s; |
- const char **values; |
- int i; |
- int rc; |
- |
- *pValues = NULL; |
- |
- rc = sql_get_statement(v, CONTENT_SELECT_STMT, &s); |
- if( rc!=SQLITE_OK ) return rc; |
- |
- rc = sqlite3_bind_int64(s, 1, iDocid); |
- if( rc!=SQLITE_OK ) return rc; |
- |
- rc = sqlite3_step(s); |
- if( rc!=SQLITE_ROW ) return rc; |
- |
- values = (const char **) sqlite3_malloc(v->nColumn * sizeof(const char *)); |
- for(i=0; i<v->nColumn; ++i){ |
- if( sqlite3_column_type(s, i)==SQLITE_NULL ){ |
- values[i] = NULL; |
- }else{ |
- values[i] = string_dup((char*)sqlite3_column_text(s, i)); |
- } |
- } |
- |
- /* We expect only one row. We must execute another sqlite3_step() |
- * to complete the iteration; otherwise the table will remain locked. */ |
- rc = sqlite3_step(s); |
- if( rc==SQLITE_DONE ){ |
- *pValues = values; |
- return SQLITE_OK; |
- } |
- |
- freeStringArray(v->nColumn, values); |
- return rc; |
-} |
- |
-/* delete from %_content where docid = [iDocid ] */ |
-static int content_delete(fulltext_vtab *v, sqlite_int64 iDocid){ |
- sqlite3_stmt *s; |
- int rc = sql_get_statement(v, CONTENT_DELETE_STMT, &s); |
- if( rc!=SQLITE_OK ) return rc; |
- |
- rc = sqlite3_bind_int64(s, 1, iDocid); |
- if( rc!=SQLITE_OK ) return rc; |
- |
- return sql_single_step(s); |
-} |
- |
-/* Returns SQLITE_ROW if any rows exist in %_content, SQLITE_DONE if |
-** no rows exist, and any error in case of failure. |
-*/ |
-static int content_exists(fulltext_vtab *v){ |
- sqlite3_stmt *s; |
- int rc = sql_get_statement(v, CONTENT_EXISTS_STMT, &s); |
- if( rc!=SQLITE_OK ) return rc; |
- |
- rc = sqlite3_step(s); |
- if( rc!=SQLITE_ROW ) return rc; |
- |
- /* We expect only one row. We must execute another sqlite3_step() |
- * to complete the iteration; otherwise the table will remain locked. */ |
- rc = sqlite3_step(s); |
- if( rc==SQLITE_DONE ) return SQLITE_ROW; |
- if( rc==SQLITE_ROW ) return SQLITE_ERROR; |
- return rc; |
-} |
- |
-/* insert into %_segments values ([pData]) |
-** returns assigned blockid in *piBlockid |
-*/ |
-static int block_insert(fulltext_vtab *v, const char *pData, int nData, |
- sqlite_int64 *piBlockid){ |
- sqlite3_stmt *s; |
- int rc = sql_get_statement(v, BLOCK_INSERT_STMT, &s); |
- if( rc!=SQLITE_OK ) return rc; |
- |
- rc = sqlite3_bind_blob(s, 1, pData, nData, SQLITE_STATIC); |
- if( rc!=SQLITE_OK ) return rc; |
- |
- rc = sqlite3_step(s); |
- if( rc==SQLITE_ROW ) return SQLITE_ERROR; |
- if( rc!=SQLITE_DONE ) return rc; |
- |
- /* blockid column is an alias for rowid. */ |
- *piBlockid = sqlite3_last_insert_rowid(v->db); |
- return SQLITE_OK; |
-} |
- |
-/* delete from %_segments |
-** where blockid between [iStartBlockid] and [iEndBlockid] |
-** |
-** Deletes the range of blocks, inclusive, used to delete the blocks |
-** which form a segment. |
-*/ |
-static int block_delete(fulltext_vtab *v, |
- sqlite_int64 iStartBlockid, sqlite_int64 iEndBlockid){ |
- sqlite3_stmt *s; |
- int rc = sql_get_statement(v, BLOCK_DELETE_STMT, &s); |
- if( rc!=SQLITE_OK ) return rc; |
- |
- rc = sqlite3_bind_int64(s, 1, iStartBlockid); |
- if( rc!=SQLITE_OK ) return rc; |
- |
- rc = sqlite3_bind_int64(s, 2, iEndBlockid); |
- if( rc!=SQLITE_OK ) return rc; |
- |
- return sql_single_step(s); |
-} |
- |
-/* Returns SQLITE_ROW with *pidx set to the maximum segment idx found |
-** at iLevel. Returns SQLITE_DONE if there are no segments at |
-** iLevel. Otherwise returns an error. |
-*/ |
-static int segdir_max_index(fulltext_vtab *v, int iLevel, int *pidx){ |
- sqlite3_stmt *s; |
- int rc = sql_get_statement(v, SEGDIR_MAX_INDEX_STMT, &s); |
- if( rc!=SQLITE_OK ) return rc; |
- |
- rc = sqlite3_bind_int(s, 1, iLevel); |
- if( rc!=SQLITE_OK ) return rc; |
- |
- rc = sqlite3_step(s); |
- /* Should always get at least one row due to how max() works. */ |
- if( rc==SQLITE_DONE ) return SQLITE_DONE; |
- if( rc!=SQLITE_ROW ) return rc; |
- |
- /* NULL means that there were no inputs to max(). */ |
- if( SQLITE_NULL==sqlite3_column_type(s, 0) ){ |
- rc = sqlite3_step(s); |
- if( rc==SQLITE_ROW ) return SQLITE_ERROR; |
- return rc; |
- } |
- |
- *pidx = sqlite3_column_int(s, 0); |
- |
- /* We expect only one row. We must execute another sqlite3_step() |
- * to complete the iteration; otherwise the table will remain locked. */ |
- rc = sqlite3_step(s); |
- if( rc==SQLITE_ROW ) return SQLITE_ERROR; |
- if( rc!=SQLITE_DONE ) return rc; |
- return SQLITE_ROW; |
-} |
- |
-/* insert into %_segdir values ( |
-** [iLevel], [idx], |
-** [iStartBlockid], [iLeavesEndBlockid], [iEndBlockid], |
-** [pRootData] |
-** ) |
-*/ |
-static int segdir_set(fulltext_vtab *v, int iLevel, int idx, |
- sqlite_int64 iStartBlockid, |
- sqlite_int64 iLeavesEndBlockid, |
- sqlite_int64 iEndBlockid, |
- const char *pRootData, int nRootData){ |
- sqlite3_stmt *s; |
- int rc = sql_get_statement(v, SEGDIR_SET_STMT, &s); |
- if( rc!=SQLITE_OK ) return rc; |
- |
- rc = sqlite3_bind_int(s, 1, iLevel); |
- if( rc!=SQLITE_OK ) return rc; |
- |
- rc = sqlite3_bind_int(s, 2, idx); |
- if( rc!=SQLITE_OK ) return rc; |
- |
- rc = sqlite3_bind_int64(s, 3, iStartBlockid); |
- if( rc!=SQLITE_OK ) return rc; |
- |
- rc = sqlite3_bind_int64(s, 4, iLeavesEndBlockid); |
- if( rc!=SQLITE_OK ) return rc; |
- |
- rc = sqlite3_bind_int64(s, 5, iEndBlockid); |
- if( rc!=SQLITE_OK ) return rc; |
- |
- rc = sqlite3_bind_blob(s, 6, pRootData, nRootData, SQLITE_STATIC); |
- if( rc!=SQLITE_OK ) return rc; |
- |
- return sql_single_step(s); |
-} |
- |
-/* Queries %_segdir for the block span of the segments in level |
-** iLevel. Returns SQLITE_DONE if there are no blocks for iLevel, |
-** SQLITE_ROW if there are blocks, else an error. |
-*/ |
-static int segdir_span(fulltext_vtab *v, int iLevel, |
- sqlite_int64 *piStartBlockid, |
- sqlite_int64 *piEndBlockid){ |
- sqlite3_stmt *s; |
- int rc = sql_get_statement(v, SEGDIR_SPAN_STMT, &s); |
- if( rc!=SQLITE_OK ) return rc; |
- |
- rc = sqlite3_bind_int(s, 1, iLevel); |
- if( rc!=SQLITE_OK ) return rc; |
- |
- rc = sqlite3_step(s); |
- if( rc==SQLITE_DONE ) return SQLITE_DONE; /* Should never happen */ |
- if( rc!=SQLITE_ROW ) return rc; |
- |
- /* This happens if all segments at this level are entirely inline. */ |
- if( SQLITE_NULL==sqlite3_column_type(s, 0) ){ |
- /* We expect only one row. We must execute another sqlite3_step() |
- * to complete the iteration; otherwise the table will remain locked. */ |
- int rc2 = sqlite3_step(s); |
- if( rc2==SQLITE_ROW ) return SQLITE_ERROR; |
- return rc2; |
- } |
- |
- *piStartBlockid = sqlite3_column_int64(s, 0); |
- *piEndBlockid = sqlite3_column_int64(s, 1); |
- |
- /* We expect only one row. We must execute another sqlite3_step() |
- * to complete the iteration; otherwise the table will remain locked. */ |
- rc = sqlite3_step(s); |
- if( rc==SQLITE_ROW ) return SQLITE_ERROR; |
- if( rc!=SQLITE_DONE ) return rc; |
- return SQLITE_ROW; |
-} |
- |
-/* Delete the segment blocks and segment directory records for all |
-** segments at iLevel. |
-*/ |
-static int segdir_delete(fulltext_vtab *v, int iLevel){ |
- sqlite3_stmt *s; |
- sqlite_int64 iStartBlockid, iEndBlockid; |
- int rc = segdir_span(v, iLevel, &iStartBlockid, &iEndBlockid); |
- if( rc!=SQLITE_ROW && rc!=SQLITE_DONE ) return rc; |
- |
- if( rc==SQLITE_ROW ){ |
- rc = block_delete(v, iStartBlockid, iEndBlockid); |
- if( rc!=SQLITE_OK ) return rc; |
- } |
- |
- /* Delete the segment directory itself. */ |
- rc = sql_get_statement(v, SEGDIR_DELETE_STMT, &s); |
- if( rc!=SQLITE_OK ) return rc; |
- |
- rc = sqlite3_bind_int64(s, 1, iLevel); |
- if( rc!=SQLITE_OK ) return rc; |
- |
- return sql_single_step(s); |
-} |
- |
-/* Delete entire fts index, SQLITE_OK on success, relevant error on |
-** failure. |
-*/ |
-static int segdir_delete_all(fulltext_vtab *v){ |
- sqlite3_stmt *s; |
- int rc = sql_get_statement(v, SEGDIR_DELETE_ALL_STMT, &s); |
- if( rc!=SQLITE_OK ) return rc; |
- |
- rc = sql_single_step(s); |
- if( rc!=SQLITE_OK ) return rc; |
- |
- rc = sql_get_statement(v, BLOCK_DELETE_ALL_STMT, &s); |
- if( rc!=SQLITE_OK ) return rc; |
- |
- return sql_single_step(s); |
-} |
- |
-/* Returns SQLITE_OK with *pnSegments set to the number of entries in |
-** %_segdir and *piMaxLevel set to the highest level which has a |
-** segment. Otherwise returns the SQLite error which caused failure. |
-*/ |
-static int segdir_count(fulltext_vtab *v, int *pnSegments, int *piMaxLevel){ |
- sqlite3_stmt *s; |
- int rc = sql_get_statement(v, SEGDIR_COUNT_STMT, &s); |
- if( rc!=SQLITE_OK ) return rc; |
- |
- rc = sqlite3_step(s); |
- /* TODO(shess): This case should not be possible? Should stronger |
- ** measures be taken if it happens? |
- */ |
- if( rc==SQLITE_DONE ){ |
- *pnSegments = 0; |
- *piMaxLevel = 0; |
- return SQLITE_OK; |
- } |
- if( rc!=SQLITE_ROW ) return rc; |
- |
- *pnSegments = sqlite3_column_int(s, 0); |
- *piMaxLevel = sqlite3_column_int(s, 1); |
- |
- /* We expect only one row. We must execute another sqlite3_step() |
- * to complete the iteration; otherwise the table will remain locked. */ |
- rc = sqlite3_step(s); |
- if( rc==SQLITE_DONE ) return SQLITE_OK; |
- if( rc==SQLITE_ROW ) return SQLITE_ERROR; |
- return rc; |
-} |
- |
-/* TODO(shess) clearPendingTerms() is far down the file because |
-** writeZeroSegment() is far down the file because LeafWriter is far |
-** down the file. Consider refactoring the code to move the non-vtab |
-** code above the vtab code so that we don't need this forward |
-** reference. |
-*/ |
-static int clearPendingTerms(fulltext_vtab *v); |
- |
-/* |
-** Free the memory used to contain a fulltext_vtab structure. |
-*/ |
-static void fulltext_vtab_destroy(fulltext_vtab *v){ |
- int iStmt, i; |
- |
- FTSTRACE(("FTS3 Destroy %p\n", v)); |
- for( iStmt=0; iStmt<MAX_STMT; iStmt++ ){ |
- if( v->pFulltextStatements[iStmt]!=NULL ){ |
- sqlite3_finalize(v->pFulltextStatements[iStmt]); |
- v->pFulltextStatements[iStmt] = NULL; |
- } |
- } |
- |
- for( i=0; i<MERGE_COUNT; i++ ){ |
- if( v->pLeafSelectStmts[i]!=NULL ){ |
- sqlite3_finalize(v->pLeafSelectStmts[i]); |
- v->pLeafSelectStmts[i] = NULL; |
- } |
- } |
- |
- if( v->pTokenizer!=NULL ){ |
- v->pTokenizer->pModule->xDestroy(v->pTokenizer); |
- v->pTokenizer = NULL; |
- } |
- |
- clearPendingTerms(v); |
- |
- sqlite3_free(v->azColumn); |
- for(i = 0; i < v->nColumn; ++i) { |
- sqlite3_free(v->azContentColumn[i]); |
- } |
- sqlite3_free(v->azContentColumn); |
- sqlite3_free(v); |
-} |
- |
-/* |
-** Token types for parsing the arguments to xConnect or xCreate. |
-*/ |
-#define TOKEN_EOF 0 /* End of file */ |
-#define TOKEN_SPACE 1 /* Any kind of whitespace */ |
-#define TOKEN_ID 2 /* An identifier */ |
-#define TOKEN_STRING 3 /* A string literal */ |
-#define TOKEN_PUNCT 4 /* A single punctuation character */ |
- |
-/* |
-** If X is a character that can be used in an identifier then |
-** ftsIdChar(X) will be true. Otherwise it is false. |
-** |
-** For ASCII, any character with the high-order bit set is |
-** allowed in an identifier. For 7-bit characters, |
-** isFtsIdChar[X] must be 1. |
-** |
-** Ticket #1066. the SQL standard does not allow '$' in the |
-** middle of identfiers. But many SQL implementations do. |
-** SQLite will allow '$' in identifiers for compatibility. |
-** But the feature is undocumented. |
-*/ |
-static const char isFtsIdChar[] = { |
-/* x0 x1 x2 x3 x4 x5 x6 x7 x8 x9 xA xB xC xD xE xF */ |
- 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 2x */ |
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, /* 3x */ |
- 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 4x */ |
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, /* 5x */ |
- 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 6x */ |
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, /* 7x */ |
-}; |
-#define ftsIdChar(C) (((c=C)&0x80)!=0 || (c>0x1f && isFtsIdChar[c-0x20])) |
- |
- |
-/* |
-** Return the length of the token that begins at z[0]. |
-** Store the token type in *tokenType before returning. |
-*/ |
-static int ftsGetToken(const char *z, int *tokenType){ |
- int i, c; |
- switch( *z ){ |
- case 0: { |
- *tokenType = TOKEN_EOF; |
- return 0; |
- } |
- case ' ': case '\t': case '\n': case '\f': case '\r': { |
- for(i=1; safe_isspace(z[i]); i++){} |
- *tokenType = TOKEN_SPACE; |
- return i; |
- } |
- case '`': |
- case '\'': |
- case '"': { |
- int delim = z[0]; |
- for(i=1; (c=z[i])!=0; i++){ |
- if( c==delim ){ |
- if( z[i+1]==delim ){ |
- i++; |
- }else{ |
- break; |
- } |
- } |
- } |
- *tokenType = TOKEN_STRING; |
- return i + (c!=0); |
- } |
- case '[': { |
- for(i=1, c=z[0]; c!=']' && (c=z[i])!=0; i++){} |
- *tokenType = TOKEN_ID; |
- return i; |
- } |
- default: { |
- if( !ftsIdChar(*z) ){ |
- break; |
- } |
- for(i=1; ftsIdChar(z[i]); i++){} |
- *tokenType = TOKEN_ID; |
- return i; |
- } |
- } |
- *tokenType = TOKEN_PUNCT; |
- return 1; |
-} |
- |
-/* |
-** A token extracted from a string is an instance of the following |
-** structure. |
-*/ |
-typedef struct FtsToken { |
- const char *z; /* Pointer to token text. Not '\000' terminated */ |
- short int n; /* Length of the token text in bytes. */ |
-} FtsToken; |
- |
-/* |
-** Given a input string (which is really one of the argv[] parameters |
-** passed into xConnect or xCreate) split the string up into tokens. |
-** Return an array of pointers to '\000' terminated strings, one string |
-** for each non-whitespace token. |
-** |
-** The returned array is terminated by a single NULL pointer. |
-** |
-** Space to hold the returned array is obtained from a single |
-** malloc and should be freed by passing the return value to free(). |
-** The individual strings within the token list are all a part of |
-** the single memory allocation and will all be freed at once. |
-*/ |
-static char **tokenizeString(const char *z, int *pnToken){ |
- int nToken = 0; |
- FtsToken *aToken = sqlite3_malloc( strlen(z) * sizeof(aToken[0]) ); |
- int n = 1; |
- int e, i; |
- int totalSize = 0; |
- char **azToken; |
- char *zCopy; |
- while( n>0 ){ |
- n = ftsGetToken(z, &e); |
- if( e!=TOKEN_SPACE ){ |
- aToken[nToken].z = z; |
- aToken[nToken].n = n; |
- nToken++; |
- totalSize += n+1; |
- } |
- z += n; |
- } |
- azToken = (char**)sqlite3_malloc( nToken*sizeof(char*) + totalSize ); |
- zCopy = (char*)&azToken[nToken]; |
- nToken--; |
- for(i=0; i<nToken; i++){ |
- azToken[i] = zCopy; |
- n = aToken[i].n; |
- memcpy(zCopy, aToken[i].z, n); |
- zCopy[n] = 0; |
- zCopy += n+1; |
- } |
- azToken[nToken] = 0; |
- sqlite3_free(aToken); |
- *pnToken = nToken; |
- return azToken; |
-} |
- |
-/* |
-** Convert an SQL-style quoted string into a normal string by removing |
-** the quote characters. The conversion is done in-place. If the |
-** input does not begin with a quote character, then this routine |
-** is a no-op. |
-** |
-** Examples: |
-** |
-** "abc" becomes abc |
-** 'xyz' becomes xyz |
-** [pqr] becomes pqr |
-** `mno` becomes mno |
-*/ |
-static void dequoteString(char *z){ |
- int quote; |
- int i, j; |
- if( z==0 ) return; |
- quote = z[0]; |
- switch( quote ){ |
- case '\'': break; |
- case '"': break; |
- case '`': break; /* For MySQL compatibility */ |
- case '[': quote = ']'; break; /* For MS SqlServer compatibility */ |
- default: return; |
- } |
- for(i=1, j=0; z[i]; i++){ |
- if( z[i]==quote ){ |
- if( z[i+1]==quote ){ |
- z[j++] = quote; |
- i++; |
- }else{ |
- z[j++] = 0; |
- break; |
- } |
- }else{ |
- z[j++] = z[i]; |
- } |
- } |
-} |
- |
-/* |
-** The input azIn is a NULL-terminated list of tokens. Remove the first |
-** token and all punctuation tokens. Remove the quotes from |
-** around string literal tokens. |
-** |
-** Example: |
-** |
-** input: tokenize chinese ( 'simplifed' , 'mixed' ) |
-** output: chinese simplifed mixed |
-** |
-** Another example: |
-** |
-** input: delimiters ( '[' , ']' , '...' ) |
-** output: [ ] ... |
-*/ |
-static void tokenListToIdList(char **azIn){ |
- int i, j; |
- if( azIn ){ |
- for(i=0, j=-1; azIn[i]; i++){ |
- if( safe_isalnum(azIn[i][0]) || azIn[i][1] ){ |
- dequoteString(azIn[i]); |
- if( j>=0 ){ |
- azIn[j] = azIn[i]; |
- } |
- j++; |
- } |
- } |
- azIn[j] = 0; |
- } |
-} |
- |
- |
-/* |
-** Find the first alphanumeric token in the string zIn. Null-terminate |
-** this token. Remove any quotation marks. And return a pointer to |
-** the result. |
-*/ |
-static char *firstToken(char *zIn, char **pzTail){ |
- int n, ttype; |
- while(1){ |
- n = ftsGetToken(zIn, &ttype); |
- if( ttype==TOKEN_SPACE ){ |
- zIn += n; |
- }else if( ttype==TOKEN_EOF ){ |
- *pzTail = zIn; |
- return 0; |
- }else{ |
- zIn[n] = 0; |
- *pzTail = &zIn[1]; |
- dequoteString(zIn); |
- return zIn; |
- } |
- } |
- /*NOTREACHED*/ |
-} |
- |
-/* Return true if... |
-** |
-** * s begins with the string t, ignoring case |
-** * s is longer than t |
-** * The first character of s beyond t is not a alphanumeric |
-** |
-** Ignore leading space in *s. |
-** |
-** To put it another way, return true if the first token of |
-** s[] is t[]. |
-*/ |
-static int startsWith(const char *s, const char *t){ |
- while( safe_isspace(*s) ){ s++; } |
- while( *t ){ |
- if( safe_tolower(*s++)!=safe_tolower(*t++) ) return 0; |
- } |
- return *s!='_' && !safe_isalnum(*s); |
-} |
- |
-/* |
-** An instance of this structure defines the "spec" of a |
-** full text index. This structure is populated by parseSpec |
-** and use by fulltextConnect and fulltextCreate. |
-*/ |
-typedef struct TableSpec { |
- const char *zDb; /* Logical database name */ |
- const char *zName; /* Name of the full-text index */ |
- int nColumn; /* Number of columns to be indexed */ |
- char **azColumn; /* Original names of columns to be indexed */ |
- char **azContentColumn; /* Column names for %_content */ |
- char **azTokenizer; /* Name of tokenizer and its arguments */ |
-} TableSpec; |
- |
-/* |
-** Reclaim all of the memory used by a TableSpec |
-*/ |
-static void clearTableSpec(TableSpec *p) { |
- sqlite3_free(p->azColumn); |
- sqlite3_free(p->azContentColumn); |
- sqlite3_free(p->azTokenizer); |
-} |
- |
-/* Parse a CREATE VIRTUAL TABLE statement, which looks like this: |
- * |
- * CREATE VIRTUAL TABLE email |
- * USING fts3(subject, body, tokenize mytokenizer(myarg)) |
- * |
- * We return parsed information in a TableSpec structure. |
- * |
- */ |
-static int parseSpec(TableSpec *pSpec, int argc, const char *const*argv, |
- char**pzErr){ |
- int i, n; |
- char *z, *zDummy; |
- char **azArg; |
- const char *zTokenizer = 0; /* argv[] entry describing the tokenizer */ |
- |
- assert( argc>=3 ); |
- /* Current interface: |
- ** argv[0] - module name |
- ** argv[1] - database name |
- ** argv[2] - table name |
- ** argv[3..] - columns, optionally followed by tokenizer specification |
- ** and snippet delimiters specification. |
- */ |
- |
- /* Make a copy of the complete argv[][] array in a single allocation. |
- ** The argv[][] array is read-only and transient. We can write to the |
- ** copy in order to modify things and the copy is persistent. |
- */ |
- CLEAR(pSpec); |
- for(i=n=0; i<argc; i++){ |
- n += strlen(argv[i]) + 1; |
- } |
- azArg = sqlite3_malloc( sizeof(char*)*argc + n ); |
- if( azArg==0 ){ |
- return SQLITE_NOMEM; |
- } |
- z = (char*)&azArg[argc]; |
- for(i=0; i<argc; i++){ |
- azArg[i] = z; |
- strcpy(z, argv[i]); |
- z += strlen(z)+1; |
- } |
- |
- /* Identify the column names and the tokenizer and delimiter arguments |
- ** in the argv[][] array. |
- */ |
- pSpec->zDb = azArg[1]; |
- pSpec->zName = azArg[2]; |
- pSpec->nColumn = 0; |
- pSpec->azColumn = azArg; |
- zTokenizer = "tokenize simple"; |
- for(i=3; i<argc; ++i){ |
- if( startsWith(azArg[i],"tokenize") ){ |
- zTokenizer = azArg[i]; |
- }else{ |
- z = azArg[pSpec->nColumn] = firstToken(azArg[i], &zDummy); |
- pSpec->nColumn++; |
- } |
- } |
- if( pSpec->nColumn==0 ){ |
- azArg[0] = "content"; |
- pSpec->nColumn = 1; |
- } |
- |
- /* |
- ** Construct the list of content column names. |
- ** |
- ** Each content column name will be of the form cNNAAAA |
- ** where NN is the column number and AAAA is the sanitized |
- ** column name. "sanitized" means that special characters are |
- ** converted to "_". The cNN prefix guarantees that all column |
- ** names are unique. |
- ** |
- ** The AAAA suffix is not strictly necessary. It is included |
- ** for the convenience of people who might examine the generated |
- ** %_content table and wonder what the columns are used for. |
- */ |
- pSpec->azContentColumn = sqlite3_malloc( pSpec->nColumn * sizeof(char *) ); |
- if( pSpec->azContentColumn==0 ){ |
- clearTableSpec(pSpec); |
- return SQLITE_NOMEM; |
- } |
- for(i=0; i<pSpec->nColumn; i++){ |
- char *p; |
- pSpec->azContentColumn[i] = sqlite3_mprintf("c%d%s", i, azArg[i]); |
- for (p = pSpec->azContentColumn[i]; *p ; ++p) { |
- if( !safe_isalnum(*p) ) *p = '_'; |
- } |
- } |
- |
- /* |
- ** Parse the tokenizer specification string. |
- */ |
- pSpec->azTokenizer = tokenizeString(zTokenizer, &n); |
- tokenListToIdList(pSpec->azTokenizer); |
- |
- return SQLITE_OK; |
-} |
- |
-/* |
-** Generate a CREATE TABLE statement that describes the schema of |
-** the virtual table. Return a pointer to this schema string. |
-** |
-** Space is obtained from sqlite3_mprintf() and should be freed |
-** using sqlite3_free(). |
-*/ |
-static char *fulltextSchema( |
- int nColumn, /* Number of columns */ |
- const char *const* azColumn, /* List of columns */ |
- const char *zTableName /* Name of the table */ |
-){ |
- int i; |
- char *zSchema, *zNext; |
- const char *zSep = "("; |
- zSchema = sqlite3_mprintf("CREATE TABLE x"); |
- for(i=0; i<nColumn; i++){ |
- zNext = sqlite3_mprintf("%s%s%Q", zSchema, zSep, azColumn[i]); |
- sqlite3_free(zSchema); |
- zSchema = zNext; |
- zSep = ","; |
- } |
- zNext = sqlite3_mprintf("%s,%Q HIDDEN", zSchema, zTableName); |
- sqlite3_free(zSchema); |
- zSchema = zNext; |
- zNext = sqlite3_mprintf("%s,docid HIDDEN)", zSchema); |
- sqlite3_free(zSchema); |
- return zNext; |
-} |
- |
-/* |
-** Build a new sqlite3_vtab structure that will describe the |
-** fulltext index defined by spec. |
-*/ |
-static int constructVtab( |
- sqlite3 *db, /* The SQLite database connection */ |
- fts3Hash *pHash, /* Hash table containing tokenizers */ |
- TableSpec *spec, /* Parsed spec information from parseSpec() */ |
- sqlite3_vtab **ppVTab, /* Write the resulting vtab structure here */ |
- char **pzErr /* Write any error message here */ |
-){ |
- int rc; |
- int n; |
- fulltext_vtab *v = 0; |
- const sqlite3_tokenizer_module *m = NULL; |
- char *schema; |
- |
- char const *zTok; /* Name of tokenizer to use for this fts table */ |
- int nTok; /* Length of zTok, including nul terminator */ |
- |
- v = (fulltext_vtab *) sqlite3_malloc(sizeof(fulltext_vtab)); |
- if( v==0 ) return SQLITE_NOMEM; |
- CLEAR(v); |
- /* sqlite will initialize v->base */ |
- v->db = db; |
- v->zDb = spec->zDb; /* Freed when azColumn is freed */ |
- v->zName = spec->zName; /* Freed when azColumn is freed */ |
- v->nColumn = spec->nColumn; |
- v->azContentColumn = spec->azContentColumn; |
- spec->azContentColumn = 0; |
- v->azColumn = spec->azColumn; |
- spec->azColumn = 0; |
- |
- if( spec->azTokenizer==0 ){ |
- return SQLITE_NOMEM; |
- } |
- |
- zTok = spec->azTokenizer[0]; |
- if( !zTok ){ |
- zTok = "simple"; |
- } |
- nTok = strlen(zTok)+1; |
- |
- m = (sqlite3_tokenizer_module *)sqlite3Fts3HashFind(pHash, zTok, nTok); |
- if( !m ){ |
- *pzErr = sqlite3_mprintf("unknown tokenizer: %s", spec->azTokenizer[0]); |
- rc = SQLITE_ERROR; |
- goto err; |
- } |
- |
- for(n=0; spec->azTokenizer[n]; n++){} |
- if( n ){ |
- rc = m->xCreate(n-1, (const char*const*)&spec->azTokenizer[1], |
- &v->pTokenizer); |
- }else{ |
- rc = m->xCreate(0, 0, &v->pTokenizer); |
- } |
- if( rc!=SQLITE_OK ) goto err; |
- v->pTokenizer->pModule = m; |
- |
- /* TODO: verify the existence of backing tables foo_content, foo_term */ |
- |
- schema = fulltextSchema(v->nColumn, (const char*const*)v->azColumn, |
- spec->zName); |
- rc = sqlite3_declare_vtab(db, schema); |
- sqlite3_free(schema); |
- if( rc!=SQLITE_OK ) goto err; |
- |
- memset(v->pFulltextStatements, 0, sizeof(v->pFulltextStatements)); |
- |
- /* Indicate that the buffer is not live. */ |
- v->nPendingData = -1; |
- |
- *ppVTab = &v->base; |
- FTSTRACE(("FTS3 Connect %p\n", v)); |
- |
- return rc; |
- |
-err: |
- fulltext_vtab_destroy(v); |
- return rc; |
-} |
- |
-static int fulltextConnect( |
- sqlite3 *db, |
- void *pAux, |
- int argc, const char *const*argv, |
- sqlite3_vtab **ppVTab, |
- char **pzErr |
-){ |
- TableSpec spec; |
- int rc = parseSpec(&spec, argc, argv, pzErr); |
- if( rc!=SQLITE_OK ) return rc; |
- |
- rc = constructVtab(db, (fts3Hash *)pAux, &spec, ppVTab, pzErr); |
- clearTableSpec(&spec); |
- return rc; |
-} |
- |
-/* The %_content table holds the text of each document, with |
-** the docid column exposed as the SQLite rowid for the table. |
-*/ |
-/* TODO(shess) This comment needs elaboration to match the updated |
-** code. Work it into the top-of-file comment at that time. |
-*/ |
-static int fulltextCreate(sqlite3 *db, void *pAux, |
- int argc, const char * const *argv, |
- sqlite3_vtab **ppVTab, char **pzErr){ |
- int rc; |
- TableSpec spec; |
- StringBuffer schema; |
- FTSTRACE(("FTS3 Create\n")); |
- |
- rc = parseSpec(&spec, argc, argv, pzErr); |
- if( rc!=SQLITE_OK ) return rc; |
- |
- initStringBuffer(&schema); |
- append(&schema, "CREATE TABLE %_content("); |
- append(&schema, " docid INTEGER PRIMARY KEY,"); |
- appendList(&schema, spec.nColumn, spec.azContentColumn); |
- append(&schema, ")"); |
- rc = sql_exec(db, spec.zDb, spec.zName, stringBufferData(&schema)); |
- stringBufferDestroy(&schema); |
- if( rc!=SQLITE_OK ) goto out; |
- |
- rc = sql_exec(db, spec.zDb, spec.zName, |
- "create table %_segments(" |
- " blockid INTEGER PRIMARY KEY," |
- " block blob" |
- ");" |
- ); |
- if( rc!=SQLITE_OK ) goto out; |
- |
- rc = sql_exec(db, spec.zDb, spec.zName, |
- "create table %_segdir(" |
- " level integer," |
- " idx integer," |
- " start_block integer," |
- " leaves_end_block integer," |
- " end_block integer," |
- " root blob," |
- " primary key(level, idx)" |
- ");"); |
- if( rc!=SQLITE_OK ) goto out; |
- |
- rc = constructVtab(db, (fts3Hash *)pAux, &spec, ppVTab, pzErr); |
- |
-out: |
- clearTableSpec(&spec); |
- return rc; |
-} |
- |
-/* Decide how to handle an SQL query. */ |
-static int fulltextBestIndex(sqlite3_vtab *pVTab, sqlite3_index_info *pInfo){ |
- fulltext_vtab *v = (fulltext_vtab *)pVTab; |
- int i; |
- FTSTRACE(("FTS3 BestIndex\n")); |
- |
- for(i=0; i<pInfo->nConstraint; ++i){ |
- const struct sqlite3_index_constraint *pConstraint; |
- pConstraint = &pInfo->aConstraint[i]; |
- if( pConstraint->usable ) { |
- if( (pConstraint->iColumn==-1 || pConstraint->iColumn==v->nColumn+1) && |
- pConstraint->op==SQLITE_INDEX_CONSTRAINT_EQ ){ |
- pInfo->idxNum = QUERY_DOCID; /* lookup by docid */ |
- FTSTRACE(("FTS3 QUERY_DOCID\n")); |
- } else if( pConstraint->iColumn>=0 && pConstraint->iColumn<=v->nColumn && |
- pConstraint->op==SQLITE_INDEX_CONSTRAINT_MATCH ){ |
- /* full-text search */ |
- pInfo->idxNum = QUERY_FULLTEXT + pConstraint->iColumn; |
- FTSTRACE(("FTS3 QUERY_FULLTEXT %d\n", pConstraint->iColumn)); |
- } else continue; |
- |
- pInfo->aConstraintUsage[i].argvIndex = 1; |
- pInfo->aConstraintUsage[i].omit = 1; |
- |
- /* An arbitrary value for now. |
- * TODO: Perhaps docid matches should be considered cheaper than |
- * full-text searches. */ |
- pInfo->estimatedCost = 1.0; |
- |
- return SQLITE_OK; |
- } |
- } |
- pInfo->idxNum = QUERY_GENERIC; |
- return SQLITE_OK; |
-} |
- |
-static int fulltextDisconnect(sqlite3_vtab *pVTab){ |
- FTSTRACE(("FTS3 Disconnect %p\n", pVTab)); |
- fulltext_vtab_destroy((fulltext_vtab *)pVTab); |
- return SQLITE_OK; |
-} |
- |
-static int fulltextDestroy(sqlite3_vtab *pVTab){ |
- fulltext_vtab *v = (fulltext_vtab *)pVTab; |
- int rc; |
- |
- FTSTRACE(("FTS3 Destroy %p\n", pVTab)); |
- rc = sql_exec(v->db, v->zDb, v->zName, |
- "drop table if exists %_content;" |
- "drop table if exists %_segments;" |
- "drop table if exists %_segdir;" |
- ); |
- if( rc!=SQLITE_OK ) return rc; |
- |
- fulltext_vtab_destroy((fulltext_vtab *)pVTab); |
- return SQLITE_OK; |
-} |
- |
-static int fulltextOpen(sqlite3_vtab *pVTab, sqlite3_vtab_cursor **ppCursor){ |
- fulltext_cursor *c; |
- |
- c = (fulltext_cursor *) sqlite3_malloc(sizeof(fulltext_cursor)); |
- if( c ){ |
- memset(c, 0, sizeof(fulltext_cursor)); |
- /* sqlite will initialize c->base */ |
- *ppCursor = &c->base; |
- FTSTRACE(("FTS3 Open %p: %p\n", pVTab, c)); |
- return SQLITE_OK; |
- }else{ |
- return SQLITE_NOMEM; |
- } |
-} |
- |
-/* Free all of the dynamically allocated memory held by the |
-** Snippet |
-*/ |
-static void snippetClear(Snippet *p){ |
- sqlite3_free(p->aMatch); |
- sqlite3_free(p->zOffset); |
- sqlite3_free(p->zSnippet); |
- CLEAR(p); |
-} |
- |
-/* |
-** Append a single entry to the p->aMatch[] log. |
-*/ |
-static void snippetAppendMatch( |
- Snippet *p, /* Append the entry to this snippet */ |
- int iCol, int iTerm, /* The column and query term */ |
- int iToken, /* Matching token in document */ |
- int iStart, int nByte /* Offset and size of the match */ |
-){ |
- int i; |
- struct snippetMatch *pMatch; |
- if( p->nMatch+1>=p->nAlloc ){ |
- p->nAlloc = p->nAlloc*2 + 10; |
- p->aMatch = sqlite3_realloc(p->aMatch, p->nAlloc*sizeof(p->aMatch[0]) ); |
- if( p->aMatch==0 ){ |
- p->nMatch = 0; |
- p->nAlloc = 0; |
- return; |
- } |
- } |
- i = p->nMatch++; |
- pMatch = &p->aMatch[i]; |
- pMatch->iCol = iCol; |
- pMatch->iTerm = iTerm; |
- pMatch->iToken = iToken; |
- pMatch->iStart = iStart; |
- pMatch->nByte = nByte; |
-} |
- |
-/* |
-** Sizing information for the circular buffer used in snippetOffsetsOfColumn() |
-*/ |
-#define FTS3_ROTOR_SZ (32) |
-#define FTS3_ROTOR_MASK (FTS3_ROTOR_SZ-1) |
- |
-/* |
-** Function to iterate through the tokens of a compiled expression. |
-** |
-** Except, skip all tokens on the right-hand side of a NOT operator. |
-** This function is used to find tokens as part of snippet and offset |
-** generation and we do nt want snippets and offsets to report matches |
-** for tokens on the RHS of a NOT. |
-*/ |
-static int fts3NextExprToken(Fts3Expr **ppExpr, int *piToken){ |
- Fts3Expr *p = *ppExpr; |
- int iToken = *piToken; |
- if( iToken<0 ){ |
- /* In this case the expression p is the root of an expression tree. |
- ** Move to the first token in the expression tree. |
- */ |
- while( p->pLeft ){ |
- p = p->pLeft; |
- } |
- iToken = 0; |
- }else{ |
- assert(p && p->eType==FTSQUERY_PHRASE ); |
- if( iToken<(p->pPhrase->nToken-1) ){ |
- iToken++; |
- }else{ |
- iToken = 0; |
- while( p->pParent && p->pParent->pLeft!=p ){ |
- assert( p->pParent->pRight==p ); |
- p = p->pParent; |
- } |
- p = p->pParent; |
- if( p ){ |
- assert( p->pRight!=0 ); |
- p = p->pRight; |
- while( p->pLeft ){ |
- p = p->pLeft; |
- } |
- } |
- } |
- } |
- |
- *ppExpr = p; |
- *piToken = iToken; |
- return p?1:0; |
-} |
- |
-/* |
-** Return TRUE if the expression node pExpr is located beneath the |
-** RHS of a NOT operator. |
-*/ |
-static int fts3ExprBeneathNot(Fts3Expr *p){ |
- Fts3Expr *pParent; |
- while( p ){ |
- pParent = p->pParent; |
- if( pParent && pParent->eType==FTSQUERY_NOT && pParent->pRight==p ){ |
- return 1; |
- } |
- p = pParent; |
- } |
- return 0; |
-} |
- |
-/* |
-** Add entries to pSnippet->aMatch[] for every match that occurs against |
-** document zDoc[0..nDoc-1] which is stored in column iColumn. |
-*/ |
-static void snippetOffsetsOfColumn( |
- fulltext_cursor *pCur, /* The fulltest search cursor */ |
- Snippet *pSnippet, /* The Snippet object to be filled in */ |
- int iColumn, /* Index of fulltext table column */ |
- const char *zDoc, /* Text of the fulltext table column */ |
- int nDoc /* Length of zDoc in bytes */ |
-){ |
- const sqlite3_tokenizer_module *pTModule; /* The tokenizer module */ |
- sqlite3_tokenizer *pTokenizer; /* The specific tokenizer */ |
- sqlite3_tokenizer_cursor *pTCursor; /* Tokenizer cursor */ |
- fulltext_vtab *pVtab; /* The full text index */ |
- int nColumn; /* Number of columns in the index */ |
- int i, j; /* Loop counters */ |
- int rc; /* Return code */ |
- unsigned int match, prevMatch; /* Phrase search bitmasks */ |
- const char *zToken; /* Next token from the tokenizer */ |
- int nToken; /* Size of zToken */ |
- int iBegin, iEnd, iPos; /* Offsets of beginning and end */ |
- |
- /* The following variables keep a circular buffer of the last |
- ** few tokens */ |
- unsigned int iRotor = 0; /* Index of current token */ |
- int iRotorBegin[FTS3_ROTOR_SZ]; /* Beginning offset of token */ |
- int iRotorLen[FTS3_ROTOR_SZ]; /* Length of token */ |
- |
- pVtab = cursor_vtab(pCur); |
- nColumn = pVtab->nColumn; |
- pTokenizer = pVtab->pTokenizer; |
- pTModule = pTokenizer->pModule; |
- rc = pTModule->xOpen(pTokenizer, zDoc, nDoc, &pTCursor); |
- if( rc ) return; |
- pTCursor->pTokenizer = pTokenizer; |
- |
- prevMatch = 0; |
- while( !pTModule->xNext(pTCursor, &zToken, &nToken, &iBegin, &iEnd, &iPos) ){ |
- Fts3Expr *pIter = pCur->pExpr; |
- int iIter = -1; |
- iRotorBegin[iRotor&FTS3_ROTOR_MASK] = iBegin; |
- iRotorLen[iRotor&FTS3_ROTOR_MASK] = iEnd-iBegin; |
- match = 0; |
- for(i=0; i<(FTS3_ROTOR_SZ-1) && fts3NextExprToken(&pIter, &iIter); i++){ |
- int nPhrase; /* Number of tokens in current phrase */ |
- struct PhraseToken *pToken; /* Current token */ |
- int iCol; /* Column index */ |
- |
- if( fts3ExprBeneathNot(pIter) ) continue; |
- nPhrase = pIter->pPhrase->nToken; |
- pToken = &pIter->pPhrase->aToken[iIter]; |
- iCol = pIter->pPhrase->iColumn; |
- if( iCol>=0 && iCol<nColumn && iCol!=iColumn ) continue; |
- if( pToken->n>nToken ) continue; |
- if( !pToken->isPrefix && pToken->n<nToken ) continue; |
- assert( pToken->n<=nToken ); |
- if( memcmp(pToken->z, zToken, pToken->n) ) continue; |
- if( iIter>0 && (prevMatch & (1<<i))==0 ) continue; |
- match |= 1<<i; |
- if( i==(FTS3_ROTOR_SZ-2) || nPhrase==iIter+1 ){ |
- for(j=nPhrase-1; j>=0; j--){ |
- int k = (iRotor-j) & FTS3_ROTOR_MASK; |
- snippetAppendMatch(pSnippet, iColumn, i-j, iPos-j, |
- iRotorBegin[k], iRotorLen[k]); |
- } |
- } |
- } |
- prevMatch = match<<1; |
- iRotor++; |
- } |
- pTModule->xClose(pTCursor); |
-} |
- |
-/* |
-** Remove entries from the pSnippet structure to account for the NEAR |
-** operator. When this is called, pSnippet contains the list of token |
-** offsets produced by treating all NEAR operators as AND operators. |
-** This function removes any entries that should not be present after |
-** accounting for the NEAR restriction. For example, if the queried |
-** document is: |
-** |
-** "A B C D E A" |
-** |
-** and the query is: |
-** |
-** A NEAR/0 E |
-** |
-** then when this function is called the Snippet contains token offsets |
-** 0, 4 and 5. This function removes the "0" entry (because the first A |
-** is not near enough to an E). |
-** |
-** When this function is called, the value pointed to by parameter piLeft is |
-** the integer id of the left-most token in the expression tree headed by |
-** pExpr. This function increments *piLeft by the total number of tokens |
-** in the expression tree headed by pExpr. |
-** |
-** Return 1 if any trimming occurs. Return 0 if no trimming is required. |
-*/ |
-static int trimSnippetOffsets( |
- Fts3Expr *pExpr, /* The search expression */ |
- Snippet *pSnippet, /* The set of snippet offsets to be trimmed */ |
- int *piLeft /* Index of left-most token in pExpr */ |
-){ |
- if( pExpr ){ |
- if( trimSnippetOffsets(pExpr->pLeft, pSnippet, piLeft) ){ |
- return 1; |
- } |
- |
- switch( pExpr->eType ){ |
- case FTSQUERY_PHRASE: |
- *piLeft += pExpr->pPhrase->nToken; |
- break; |
- case FTSQUERY_NEAR: { |
- /* The right-hand-side of a NEAR operator is always a phrase. The |
- ** left-hand-side is either a phrase or an expression tree that is |
- ** itself headed by a NEAR operator. The following initializations |
- ** set local variable iLeft to the token number of the left-most |
- ** token in the right-hand phrase, and iRight to the right most |
- ** token in the same phrase. For example, if we had: |
- ** |
- ** <col> MATCH '"abc def" NEAR/2 "ghi jkl"' |
- ** |
- ** then iLeft will be set to 2 (token number of ghi) and nToken will |
- ** be set to 4. |
- */ |
- Fts3Expr *pLeft = pExpr->pLeft; |
- Fts3Expr *pRight = pExpr->pRight; |
- int iLeft = *piLeft; |
- int nNear = pExpr->nNear; |
- int nToken = pRight->pPhrase->nToken; |
- int jj, ii; |
- if( pLeft->eType==FTSQUERY_NEAR ){ |
- pLeft = pLeft->pRight; |
- } |
- assert( pRight->eType==FTSQUERY_PHRASE ); |
- assert( pLeft->eType==FTSQUERY_PHRASE ); |
- nToken += pLeft->pPhrase->nToken; |
- |
- for(ii=0; ii<pSnippet->nMatch; ii++){ |
- struct snippetMatch *p = &pSnippet->aMatch[ii]; |
- if( p->iTerm==iLeft ){ |
- int isOk = 0; |
- /* Snippet ii is an occurence of query term iLeft in the document. |
- ** It occurs at position (p->iToken) of the document. We now |
- ** search for an instance of token (iLeft-1) somewhere in the |
- ** range (p->iToken - nNear)...(p->iToken + nNear + nToken) within |
- ** the set of snippetMatch structures. If one is found, proceed. |
- ** If one cannot be found, then remove snippets ii..(ii+N-1) |
- ** from the matching snippets, where N is the number of tokens |
- ** in phrase pRight->pPhrase. |
- */ |
- for(jj=0; isOk==0 && jj<pSnippet->nMatch; jj++){ |
- struct snippetMatch *p2 = &pSnippet->aMatch[jj]; |
- if( p2->iTerm==(iLeft-1) ){ |
- if( p2->iToken>=(p->iToken-nNear-1) |
- && p2->iToken<(p->iToken+nNear+nToken) |
- ){ |
- isOk = 1; |
- } |
- } |
- } |
- if( !isOk ){ |
- int kk; |
- for(kk=0; kk<pRight->pPhrase->nToken; kk++){ |
- pSnippet->aMatch[kk+ii].iTerm = -2; |
- } |
- return 1; |
- } |
- } |
- if( p->iTerm==(iLeft-1) ){ |
- int isOk = 0; |
- for(jj=0; isOk==0 && jj<pSnippet->nMatch; jj++){ |
- struct snippetMatch *p2 = &pSnippet->aMatch[jj]; |
- if( p2->iTerm==iLeft ){ |
- if( p2->iToken<=(p->iToken+nNear+1) |
- && p2->iToken>(p->iToken-nNear-nToken) |
- ){ |
- isOk = 1; |
- } |
- } |
- } |
- if( !isOk ){ |
- int kk; |
- for(kk=0; kk<pLeft->pPhrase->nToken; kk++){ |
- pSnippet->aMatch[ii-kk].iTerm = -2; |
- } |
- return 1; |
- } |
- } |
- } |
- break; |
- } |
- } |
- |
- if( trimSnippetOffsets(pExpr->pRight, pSnippet, piLeft) ){ |
- return 1; |
- } |
- } |
- return 0; |
-} |
- |
-/* |
-** Compute all offsets for the current row of the query. |
-** If the offsets have already been computed, this routine is a no-op. |
-*/ |
-static void snippetAllOffsets(fulltext_cursor *p){ |
- int nColumn; |
- int iColumn, i; |
- int iFirst, iLast; |
- int iTerm = 0; |
- fulltext_vtab *pFts = cursor_vtab(p); |
- |
- if( p->snippet.nMatch || p->pExpr==0 ){ |
- return; |
- } |
- nColumn = pFts->nColumn; |
- iColumn = (p->iCursorType - QUERY_FULLTEXT); |
- if( iColumn<0 || iColumn>=nColumn ){ |
- /* Look for matches over all columns of the full-text index */ |
- iFirst = 0; |
- iLast = nColumn-1; |
- }else{ |
- /* Look for matches in the iColumn-th column of the index only */ |
- iFirst = iColumn; |
- iLast = iColumn; |
- } |
- for(i=iFirst; i<=iLast; i++){ |
- const char *zDoc; |
- int nDoc; |
- zDoc = (const char*)sqlite3_column_text(p->pStmt, i+1); |
- nDoc = sqlite3_column_bytes(p->pStmt, i+1); |
- snippetOffsetsOfColumn(p, &p->snippet, i, zDoc, nDoc); |
- } |
- |
- while( trimSnippetOffsets(p->pExpr, &p->snippet, &iTerm) ){ |
- iTerm = 0; |
- } |
-} |
- |
-/* |
-** Convert the information in the aMatch[] array of the snippet |
-** into the string zOffset[0..nOffset-1]. This string is used as |
-** the return of the SQL offsets() function. |
-*/ |
-static void snippetOffsetText(Snippet *p){ |
- int i; |
- int cnt = 0; |
- StringBuffer sb; |
- char zBuf[200]; |
- if( p->zOffset ) return; |
- initStringBuffer(&sb); |
- for(i=0; i<p->nMatch; i++){ |
- struct snippetMatch *pMatch = &p->aMatch[i]; |
- if( pMatch->iTerm>=0 ){ |
- /* If snippetMatch.iTerm is less than 0, then the match was |
- ** discarded as part of processing the NEAR operator (see the |
- ** trimSnippetOffsetsForNear() function for details). Ignore |
- ** it in this case |
- */ |
- zBuf[0] = ' '; |
- sqlite3_snprintf(sizeof(zBuf)-1, &zBuf[cnt>0], "%d %d %d %d", |
- pMatch->iCol, pMatch->iTerm, pMatch->iStart, pMatch->nByte); |
- append(&sb, zBuf); |
- cnt++; |
- } |
- } |
- p->zOffset = stringBufferData(&sb); |
- p->nOffset = stringBufferLength(&sb); |
-} |
- |
-/* |
-** zDoc[0..nDoc-1] is phrase of text. aMatch[0..nMatch-1] are a set |
-** of matching words some of which might be in zDoc. zDoc is column |
-** number iCol. |
-** |
-** iBreak is suggested spot in zDoc where we could begin or end an |
-** excerpt. Return a value similar to iBreak but possibly adjusted |
-** to be a little left or right so that the break point is better. |
-*/ |
-static int wordBoundary( |
- int iBreak, /* The suggested break point */ |
- const char *zDoc, /* Document text */ |
- int nDoc, /* Number of bytes in zDoc[] */ |
- struct snippetMatch *aMatch, /* Matching words */ |
- int nMatch, /* Number of entries in aMatch[] */ |
- int iCol /* The column number for zDoc[] */ |
-){ |
- int i; |
- if( iBreak<=10 ){ |
- return 0; |
- } |
- if( iBreak>=nDoc-10 ){ |
- return nDoc; |
- } |
- for(i=0; i<nMatch && aMatch[i].iCol<iCol; i++){} |
- while( i<nMatch && aMatch[i].iStart+aMatch[i].nByte<iBreak ){ i++; } |
- if( i<nMatch ){ |
- if( aMatch[i].iStart<iBreak+10 ){ |
- return aMatch[i].iStart; |
- } |
- if( i>0 && aMatch[i-1].iStart+aMatch[i-1].nByte>=iBreak ){ |
- return aMatch[i-1].iStart; |
- } |
- } |
- for(i=1; i<=10; i++){ |
- if( safe_isspace(zDoc[iBreak-i]) ){ |
- return iBreak - i + 1; |
- } |
- if( safe_isspace(zDoc[iBreak+i]) ){ |
- return iBreak + i + 1; |
- } |
- } |
- return iBreak; |
-} |
- |
- |
- |
-/* |
-** Allowed values for Snippet.aMatch[].snStatus |
-*/ |
-#define SNIPPET_IGNORE 0 /* It is ok to omit this match from the snippet */ |
-#define SNIPPET_DESIRED 1 /* We want to include this match in the snippet */ |
- |
-/* |
-** Generate the text of a snippet. |
-*/ |
-static void snippetText( |
- fulltext_cursor *pCursor, /* The cursor we need the snippet for */ |
- const char *zStartMark, /* Markup to appear before each match */ |
- const char *zEndMark, /* Markup to appear after each match */ |
- const char *zEllipsis /* Ellipsis mark */ |
-){ |
- int i, j; |
- struct snippetMatch *aMatch; |
- int nMatch; |
- int nDesired; |
- StringBuffer sb; |
- int tailCol; |
- int tailOffset; |
- int iCol; |
- int nDoc; |
- const char *zDoc; |
- int iStart, iEnd; |
- int tailEllipsis = 0; |
- int iMatch; |
- |
- |
- sqlite3_free(pCursor->snippet.zSnippet); |
- pCursor->snippet.zSnippet = 0; |
- aMatch = pCursor->snippet.aMatch; |
- nMatch = pCursor->snippet.nMatch; |
- initStringBuffer(&sb); |
- |
- for(i=0; i<nMatch; i++){ |
- aMatch[i].snStatus = SNIPPET_IGNORE; |
- } |
- nDesired = 0; |
- for(i=0; i<FTS3_ROTOR_SZ; i++){ |
- for(j=0; j<nMatch; j++){ |
- if( aMatch[j].iTerm==i ){ |
- aMatch[j].snStatus = SNIPPET_DESIRED; |
- nDesired++; |
- break; |
- } |
- } |
- } |
- |
- iMatch = 0; |
- tailCol = -1; |
- tailOffset = 0; |
- for(i=0; i<nMatch && nDesired>0; i++){ |
- if( aMatch[i].snStatus!=SNIPPET_DESIRED ) continue; |
- nDesired--; |
- iCol = aMatch[i].iCol; |
- zDoc = (const char*)sqlite3_column_text(pCursor->pStmt, iCol+1); |
- nDoc = sqlite3_column_bytes(pCursor->pStmt, iCol+1); |
- iStart = aMatch[i].iStart - 40; |
- iStart = wordBoundary(iStart, zDoc, nDoc, aMatch, nMatch, iCol); |
- if( iStart<=10 ){ |
- iStart = 0; |
- } |
- if( iCol==tailCol && iStart<=tailOffset+20 ){ |
- iStart = tailOffset; |
- } |
- if( (iCol!=tailCol && tailCol>=0) || iStart!=tailOffset ){ |
- trimWhiteSpace(&sb); |
- appendWhiteSpace(&sb); |
- append(&sb, zEllipsis); |
- appendWhiteSpace(&sb); |
- } |
- iEnd = aMatch[i].iStart + aMatch[i].nByte + 40; |
- iEnd = wordBoundary(iEnd, zDoc, nDoc, aMatch, nMatch, iCol); |
- if( iEnd>=nDoc-10 ){ |
- iEnd = nDoc; |
- tailEllipsis = 0; |
- }else{ |
- tailEllipsis = 1; |
- } |
- while( iMatch<nMatch && aMatch[iMatch].iCol<iCol ){ iMatch++; } |
- while( iStart<iEnd ){ |
- while( iMatch<nMatch && aMatch[iMatch].iStart<iStart |
- && aMatch[iMatch].iCol<=iCol ){ |
- iMatch++; |
- } |
- if( iMatch<nMatch && aMatch[iMatch].iStart<iEnd |
- && aMatch[iMatch].iCol==iCol ){ |
- nappend(&sb, &zDoc[iStart], aMatch[iMatch].iStart - iStart); |
- iStart = aMatch[iMatch].iStart; |
- append(&sb, zStartMark); |
- nappend(&sb, &zDoc[iStart], aMatch[iMatch].nByte); |
- append(&sb, zEndMark); |
- iStart += aMatch[iMatch].nByte; |
- for(j=iMatch+1; j<nMatch; j++){ |
- if( aMatch[j].iTerm==aMatch[iMatch].iTerm |
- && aMatch[j].snStatus==SNIPPET_DESIRED ){ |
- nDesired--; |
- aMatch[j].snStatus = SNIPPET_IGNORE; |
- } |
- } |
- }else{ |
- nappend(&sb, &zDoc[iStart], iEnd - iStart); |
- iStart = iEnd; |
- } |
- } |
- tailCol = iCol; |
- tailOffset = iEnd; |
- } |
- trimWhiteSpace(&sb); |
- if( tailEllipsis ){ |
- appendWhiteSpace(&sb); |
- append(&sb, zEllipsis); |
- } |
- pCursor->snippet.zSnippet = stringBufferData(&sb); |
- pCursor->snippet.nSnippet = stringBufferLength(&sb); |
-} |
- |
- |
-/* |
-** Close the cursor. For additional information see the documentation |
-** on the xClose method of the virtual table interface. |
-*/ |
-static int fulltextClose(sqlite3_vtab_cursor *pCursor){ |
- fulltext_cursor *c = (fulltext_cursor *) pCursor; |
- FTSTRACE(("FTS3 Close %p\n", c)); |
- sqlite3_finalize(c->pStmt); |
- sqlite3Fts3ExprFree(c->pExpr); |
- snippetClear(&c->snippet); |
- if( c->result.nData!=0 ){ |
- dlrDestroy(&c->reader); |
- } |
- dataBufferDestroy(&c->result); |
- sqlite3_free(c); |
- return SQLITE_OK; |
-} |
- |
-static int fulltextNext(sqlite3_vtab_cursor *pCursor){ |
- fulltext_cursor *c = (fulltext_cursor *) pCursor; |
- int rc; |
- |
- FTSTRACE(("FTS3 Next %p\n", pCursor)); |
- snippetClear(&c->snippet); |
- if( c->iCursorType < QUERY_FULLTEXT ){ |
- /* TODO(shess) Handle SQLITE_SCHEMA AND SQLITE_BUSY. */ |
- rc = sqlite3_step(c->pStmt); |
- switch( rc ){ |
- case SQLITE_ROW: |
- c->eof = 0; |
- return SQLITE_OK; |
- case SQLITE_DONE: |
- c->eof = 1; |
- return SQLITE_OK; |
- default: |
- c->eof = 1; |
- return rc; |
- } |
- } else { /* full-text query */ |
- rc = sqlite3_reset(c->pStmt); |
- if( rc!=SQLITE_OK ) return rc; |
- |
- if( c->result.nData==0 || dlrAtEnd(&c->reader) ){ |
- c->eof = 1; |
- return SQLITE_OK; |
- } |
- rc = sqlite3_bind_int64(c->pStmt, 1, dlrDocid(&c->reader)); |
- if( rc!=SQLITE_OK ) return rc; |
- rc = dlrStep(&c->reader); |
- if( rc!=SQLITE_OK ) return rc; |
- /* TODO(shess) Handle SQLITE_SCHEMA AND SQLITE_BUSY. */ |
- rc = sqlite3_step(c->pStmt); |
- if( rc==SQLITE_ROW ){ /* the case we expect */ |
- c->eof = 0; |
- return SQLITE_OK; |
- } |
- /* Corrupt if the index refers to missing document. */ |
- if( rc==SQLITE_DONE ) return SQLITE_CORRUPT_BKPT; |
- |
- return rc; |
- } |
-} |
- |
- |
-/* TODO(shess) If we pushed LeafReader to the top of the file, or to |
-** another file, term_select() could be pushed above |
-** docListOfTerm(). |
-*/ |
-static int termSelect(fulltext_vtab *v, int iColumn, |
- const char *pTerm, int nTerm, int isPrefix, |
- DocListType iType, DataBuffer *out); |
- |
-/* |
-** Return a DocList corresponding to the phrase *pPhrase. |
-** |
-** The resulting DL_DOCIDS doclist is stored in pResult, which is |
-** overwritten. |
-*/ |
-static int docListOfPhrase( |
- fulltext_vtab *pTab, /* The full text index */ |
- Fts3Phrase *pPhrase, /* Phrase to return a doclist corresponding to */ |
- DocListType eListType, /* Either DL_DOCIDS or DL_POSITIONS */ |
- DataBuffer *pResult /* Write the result here */ |
-){ |
- int ii; |
- int rc = SQLITE_OK; |
- int iCol = pPhrase->iColumn; |
- DocListType eType = eListType; |
- assert( eType==DL_POSITIONS || eType==DL_DOCIDS ); |
- if( pPhrase->nToken>1 ){ |
- eType = DL_POSITIONS; |
- } |
- |
- /* This code should never be called with buffered updates. */ |
- assert( pTab->nPendingData<0 ); |
- |
- for(ii=0; rc==SQLITE_OK && ii<pPhrase->nToken; ii++){ |
- DataBuffer tmp; |
- struct PhraseToken *p = &pPhrase->aToken[ii]; |
- rc = termSelect(pTab, iCol, p->z, p->n, p->isPrefix, eType, &tmp); |
- if( rc==SQLITE_OK ){ |
- if( ii==0 ){ |
- *pResult = tmp; |
- }else{ |
- DataBuffer res = *pResult; |
- dataBufferInit(pResult, 0); |
- if( ii==(pPhrase->nToken-1) ){ |
- eType = eListType; |
- } |
- rc = docListPhraseMerge( |
- res.pData, res.nData, tmp.pData, tmp.nData, 0, 0, eType, pResult |
- ); |
- dataBufferDestroy(&res); |
- dataBufferDestroy(&tmp); |
- if( rc!= SQLITE_OK ) return rc; |
- } |
- } |
- } |
- |
- return rc; |
-} |
- |
-/* |
-** Evaluate the full-text expression pExpr against fts3 table pTab. Write |
-** the results into pRes. |
-*/ |
-static int evalFts3Expr( |
- fulltext_vtab *pTab, /* Fts3 Virtual table object */ |
- Fts3Expr *pExpr, /* Parsed fts3 expression */ |
- DataBuffer *pRes /* OUT: Write results of the expression here */ |
-){ |
- int rc = SQLITE_OK; |
- |
- /* Initialize the output buffer. If this is an empty query (pExpr==0), |
- ** this is all that needs to be done. Empty queries produce empty |
- ** result sets. |
- */ |
- dataBufferInit(pRes, 0); |
- |
- if( pExpr ){ |
- if( pExpr->eType==FTSQUERY_PHRASE ){ |
- DocListType eType = DL_DOCIDS; |
- if( pExpr->pParent && pExpr->pParent->eType==FTSQUERY_NEAR ){ |
- eType = DL_POSITIONS; |
- } |
- rc = docListOfPhrase(pTab, pExpr->pPhrase, eType, pRes); |
- }else{ |
- DataBuffer lhs; |
- DataBuffer rhs; |
- |
- dataBufferInit(&rhs, 0); |
- if( SQLITE_OK==(rc = evalFts3Expr(pTab, pExpr->pLeft, &lhs)) |
- && SQLITE_OK==(rc = evalFts3Expr(pTab, pExpr->pRight, &rhs)) |
- ){ |
- switch( pExpr->eType ){ |
- case FTSQUERY_NEAR: { |
- int nToken; |
- Fts3Expr *pLeft; |
- DocListType eType = DL_DOCIDS; |
- if( pExpr->pParent && pExpr->pParent->eType==FTSQUERY_NEAR ){ |
- eType = DL_POSITIONS; |
- } |
- pLeft = pExpr->pLeft; |
- while( pLeft->eType==FTSQUERY_NEAR ){ |
- pLeft=pLeft->pRight; |
- } |
- assert( pExpr->pRight->eType==FTSQUERY_PHRASE ); |
- assert( pLeft->eType==FTSQUERY_PHRASE ); |
- nToken = pLeft->pPhrase->nToken + pExpr->pRight->pPhrase->nToken; |
- rc = docListPhraseMerge(lhs.pData, lhs.nData, rhs.pData, rhs.nData, |
- pExpr->nNear+1, nToken, eType, pRes |
- ); |
- break; |
- } |
- case FTSQUERY_NOT: { |
- rc = docListExceptMerge(lhs.pData, lhs.nData, rhs.pData, rhs.nData,pRes); |
- break; |
- } |
- case FTSQUERY_AND: { |
- rc = docListAndMerge(lhs.pData, lhs.nData, rhs.pData, rhs.nData, pRes); |
- break; |
- } |
- case FTSQUERY_OR: { |
- rc = docListOrMerge(lhs.pData, lhs.nData, rhs.pData, rhs.nData, pRes); |
- break; |
- } |
- } |
- } |
- dataBufferDestroy(&lhs); |
- dataBufferDestroy(&rhs); |
- } |
- } |
- |
- return rc; |
-} |
- |
-/* TODO(shess) Refactor the code to remove this forward decl. */ |
-static int flushPendingTerms(fulltext_vtab *v); |
- |
-/* Perform a full-text query using the search expression in |
-** zInput[0..nInput-1]. Return a list of matching documents |
-** in pResult. |
-** |
-** Queries must match column iColumn. Or if iColumn>=nColumn |
-** they are allowed to match against any column. |
-*/ |
-static int fulltextQuery( |
- fulltext_vtab *v, /* The full text index */ |
- int iColumn, /* Match against this column by default */ |
- const char *zInput, /* The query string */ |
- int nInput, /* Number of bytes in zInput[] */ |
- DataBuffer *pResult, /* Write the result doclist here */ |
- Fts3Expr **ppExpr /* Put parsed query string here */ |
-){ |
- int rc; |
- |
- /* TODO(shess) Instead of flushing pendingTerms, we could query for |
- ** the relevant term and merge the doclist into what we receive from |
- ** the database. Wait and see if this is a common issue, first. |
- ** |
- ** A good reason not to flush is to not generate update-related |
- ** error codes from here. |
- */ |
- |
- /* Flush any buffered updates before executing the query. */ |
- rc = flushPendingTerms(v); |
- if( rc!=SQLITE_OK ){ |
- return rc; |
- } |
- |
- /* Parse the query passed to the MATCH operator. */ |
- rc = sqlite3Fts3ExprParse(v->pTokenizer, |
- v->azColumn, v->nColumn, iColumn, zInput, nInput, ppExpr |
- ); |
- if( rc!=SQLITE_OK ){ |
- assert( 0==(*ppExpr) ); |
- return rc; |
- } |
- |
- return evalFts3Expr(v, *ppExpr, pResult); |
-} |
- |
-/* |
-** This is the xFilter interface for the virtual table. See |
-** the virtual table xFilter method documentation for additional |
-** information. |
-** |
-** If idxNum==QUERY_GENERIC then do a full table scan against |
-** the %_content table. |
-** |
-** If idxNum==QUERY_DOCID then do a docid lookup for a single entry |
-** in the %_content table. |
-** |
-** If idxNum>=QUERY_FULLTEXT then use the full text index. The |
-** column on the left-hand side of the MATCH operator is column |
-** number idxNum-QUERY_FULLTEXT, 0 indexed. argv[0] is the right-hand |
-** side of the MATCH operator. |
-*/ |
-/* TODO(shess) Upgrade the cursor initialization and destruction to |
-** account for fulltextFilter() being called multiple times on the |
-** same cursor. The current solution is very fragile. Apply fix to |
-** fts3 as appropriate. |
-*/ |
-static int fulltextFilter( |
- sqlite3_vtab_cursor *pCursor, /* The cursor used for this query */ |
- int idxNum, const char *idxStr, /* Which indexing scheme to use */ |
- int argc, sqlite3_value **argv /* Arguments for the indexing scheme */ |
-){ |
- fulltext_cursor *c = (fulltext_cursor *) pCursor; |
- fulltext_vtab *v = cursor_vtab(c); |
- int rc; |
- |
- FTSTRACE(("FTS3 Filter %p\n",pCursor)); |
- |
- /* If the cursor has a statement that was not prepared according to |
- ** idxNum, clear it. I believe all calls to fulltextFilter with a |
- ** given cursor will have the same idxNum , but in this case it's |
- ** easy to be safe. |
- */ |
- if( c->pStmt && c->iCursorType!=idxNum ){ |
- sqlite3_finalize(c->pStmt); |
- c->pStmt = NULL; |
- } |
- |
- /* Get a fresh statement appropriate to idxNum. */ |
- /* TODO(shess): Add a prepared-statement cache in the vt structure. |
- ** The cache must handle multiple open cursors. Easier to cache the |
- ** statement variants at the vt to reduce malloc/realloc/free here. |
- ** Or we could have a StringBuffer variant which allowed stack |
- ** construction for small values. |
- */ |
- if( !c->pStmt ){ |
- StringBuffer sb; |
- initStringBuffer(&sb); |
- append(&sb, "SELECT docid, "); |
- appendList(&sb, v->nColumn, v->azContentColumn); |
- append(&sb, " FROM %_content"); |
- if( idxNum!=QUERY_GENERIC ) append(&sb, " WHERE docid = ?"); |
- rc = sql_prepare(v->db, v->zDb, v->zName, &c->pStmt, |
- stringBufferData(&sb)); |
- stringBufferDestroy(&sb); |
- if( rc!=SQLITE_OK ) return rc; |
- c->iCursorType = idxNum; |
- }else{ |
- sqlite3_reset(c->pStmt); |
- assert( c->iCursorType==idxNum ); |
- } |
- |
- switch( idxNum ){ |
- case QUERY_GENERIC: |
- break; |
- |
- case QUERY_DOCID: |
- rc = sqlite3_bind_int64(c->pStmt, 1, sqlite3_value_int64(argv[0])); |
- if( rc!=SQLITE_OK ) return rc; |
- break; |
- |
- default: /* full-text search */ |
- { |
- int iCol = idxNum-QUERY_FULLTEXT; |
- const char *zQuery = (const char *)sqlite3_value_text(argv[0]); |
- assert( idxNum<=QUERY_FULLTEXT+v->nColumn); |
- assert( argc==1 ); |
- if( c->result.nData!=0 ){ |
- /* This case happens if the same cursor is used repeatedly. */ |
- dlrDestroy(&c->reader); |
- dataBufferReset(&c->result); |
- }else{ |
- dataBufferInit(&c->result, 0); |
- } |
- rc = fulltextQuery(v, iCol, zQuery, -1, &c->result, &c->pExpr); |
- if( rc!=SQLITE_OK ) return rc; |
- if( c->result.nData!=0 ){ |
- dlrInit(&c->reader, DL_DOCIDS, c->result.pData, c->result.nData); |
- } |
- break; |
- } |
- } |
- |
- return fulltextNext(pCursor); |
-} |
- |
-/* This is the xEof method of the virtual table. The SQLite core |
-** calls this routine to find out if it has reached the end of |
-** a query's results set. |
-*/ |
-static int fulltextEof(sqlite3_vtab_cursor *pCursor){ |
- fulltext_cursor *c = (fulltext_cursor *) pCursor; |
- return c->eof; |
-} |
- |
-/* This is the xColumn method of the virtual table. The SQLite |
-** core calls this method during a query when it needs the value |
-** of a column from the virtual table. This method needs to use |
-** one of the sqlite3_result_*() routines to store the requested |
-** value back in the pContext. |
-*/ |
-static int fulltextColumn(sqlite3_vtab_cursor *pCursor, |
- sqlite3_context *pContext, int idxCol){ |
- fulltext_cursor *c = (fulltext_cursor *) pCursor; |
- fulltext_vtab *v = cursor_vtab(c); |
- |
- if( idxCol<v->nColumn ){ |
- sqlite3_value *pVal = sqlite3_column_value(c->pStmt, idxCol+1); |
- sqlite3_result_value(pContext, pVal); |
- }else if( idxCol==v->nColumn ){ |
- /* The extra column whose name is the same as the table. |
- ** Return a blob which is a pointer to the cursor |
- */ |
- sqlite3_result_blob(pContext, &c, sizeof(c), SQLITE_TRANSIENT); |
- }else if( idxCol==v->nColumn+1 ){ |
- /* The docid column, which is an alias for rowid. */ |
- sqlite3_value *pVal = sqlite3_column_value(c->pStmt, 0); |
- sqlite3_result_value(pContext, pVal); |
- } |
- return SQLITE_OK; |
-} |
- |
-/* This is the xRowid method. The SQLite core calls this routine to |
-** retrieve the rowid for the current row of the result set. fts3 |
-** exposes %_content.docid as the rowid for the virtual table. The |
-** rowid should be written to *pRowid. |
-*/ |
-static int fulltextRowid(sqlite3_vtab_cursor *pCursor, sqlite_int64 *pRowid){ |
- fulltext_cursor *c = (fulltext_cursor *) pCursor; |
- |
- *pRowid = sqlite3_column_int64(c->pStmt, 0); |
- return SQLITE_OK; |
-} |
- |
-/* Add all terms in [zText] to pendingTerms table. If [iColumn] > 0, |
-** we also store positions and offsets in the hash table using that |
-** column number. |
-*/ |
-static int buildTerms(fulltext_vtab *v, sqlite_int64 iDocid, |
- const char *zText, int iColumn){ |
- sqlite3_tokenizer *pTokenizer = v->pTokenizer; |
- sqlite3_tokenizer_cursor *pCursor; |
- const char *pToken; |
- int nTokenBytes; |
- int iStartOffset, iEndOffset, iPosition; |
- int rc; |
- |
- rc = pTokenizer->pModule->xOpen(pTokenizer, zText, -1, &pCursor); |
- if( rc!=SQLITE_OK ) return rc; |
- |
- pCursor->pTokenizer = pTokenizer; |
- while( SQLITE_OK==(rc=pTokenizer->pModule->xNext(pCursor, |
- &pToken, &nTokenBytes, |
- &iStartOffset, &iEndOffset, |
- &iPosition)) ){ |
- DLCollector *p; |
- int nData; /* Size of doclist before our update. */ |
- |
- /* Positions can't be negative; we use -1 as a terminator |
- * internally. Token can't be NULL or empty. */ |
- if( iPosition<0 || pToken == NULL || nTokenBytes == 0 ){ |
- rc = SQLITE_ERROR; |
- break; |
- } |
- |
- p = fts3HashFind(&v->pendingTerms, pToken, nTokenBytes); |
- if( p==NULL ){ |
- nData = 0; |
- p = dlcNew(iDocid, DL_DEFAULT); |
- fts3HashInsert(&v->pendingTerms, pToken, nTokenBytes, p); |
- |
- /* Overhead for our hash table entry, the key, and the value. */ |
- v->nPendingData += sizeof(struct fts3HashElem)+sizeof(*p)+nTokenBytes; |
- }else{ |
- nData = p->b.nData; |
- if( p->dlw.iPrevDocid!=iDocid ) dlcNext(p, iDocid); |
- } |
- if( iColumn>=0 ){ |
- dlcAddPos(p, iColumn, iPosition, iStartOffset, iEndOffset); |
- } |
- |
- /* Accumulate data added by dlcNew or dlcNext, and dlcAddPos. */ |
- v->nPendingData += p->b.nData-nData; |
- } |
- |
- /* TODO(shess) Check return? Should this be able to cause errors at |
- ** this point? Actually, same question about sqlite3_finalize(), |
- ** though one could argue that failure there means that the data is |
- ** not durable. *ponder* |
- */ |
- pTokenizer->pModule->xClose(pCursor); |
- if( SQLITE_DONE == rc ) return SQLITE_OK; |
- return rc; |
-} |
- |
-/* Add doclists for all terms in [pValues] to pendingTerms table. */ |
-static int insertTerms(fulltext_vtab *v, sqlite_int64 iDocid, |
- sqlite3_value **pValues){ |
- int i; |
- for(i = 0; i < v->nColumn ; ++i){ |
- char *zText = (char*)sqlite3_value_text(pValues[i]); |
- int rc = buildTerms(v, iDocid, zText, i); |
- if( rc!=SQLITE_OK ) return rc; |
- } |
- return SQLITE_OK; |
-} |
- |
-/* Add empty doclists for all terms in the given row's content to |
-** pendingTerms. |
-*/ |
-static int deleteTerms(fulltext_vtab *v, sqlite_int64 iDocid){ |
- const char **pValues; |
- int i, rc; |
- |
- /* TODO(shess) Should we allow such tables at all? */ |
- if( DL_DEFAULT==DL_DOCIDS ) return SQLITE_ERROR; |
- |
- rc = content_select(v, iDocid, &pValues); |
- if( rc!=SQLITE_OK ) return rc; |
- |
- for(i = 0 ; i < v->nColumn; ++i) { |
- rc = buildTerms(v, iDocid, pValues[i], -1); |
- if( rc!=SQLITE_OK ) break; |
- } |
- |
- freeStringArray(v->nColumn, pValues); |
- return SQLITE_OK; |
-} |
- |
-/* TODO(shess) Refactor the code to remove this forward decl. */ |
-static int initPendingTerms(fulltext_vtab *v, sqlite_int64 iDocid); |
- |
-/* Insert a row into the %_content table; set *piDocid to be the ID of the |
-** new row. Add doclists for terms to pendingTerms. |
-*/ |
-static int index_insert(fulltext_vtab *v, sqlite3_value *pRequestDocid, |
- sqlite3_value **pValues, sqlite_int64 *piDocid){ |
- int rc; |
- |
- rc = content_insert(v, pRequestDocid, pValues); /* execute an SQL INSERT */ |
- if( rc!=SQLITE_OK ) return rc; |
- |
- /* docid column is an alias for rowid. */ |
- *piDocid = sqlite3_last_insert_rowid(v->db); |
- rc = initPendingTerms(v, *piDocid); |
- if( rc!=SQLITE_OK ) return rc; |
- |
- return insertTerms(v, *piDocid, pValues); |
-} |
- |
-/* Delete a row from the %_content table; add empty doclists for terms |
-** to pendingTerms. |
-*/ |
-static int index_delete(fulltext_vtab *v, sqlite_int64 iRow){ |
- int rc = initPendingTerms(v, iRow); |
- if( rc!=SQLITE_OK ) return rc; |
- |
- rc = deleteTerms(v, iRow); |
- if( rc!=SQLITE_OK ) return rc; |
- |
- return content_delete(v, iRow); /* execute an SQL DELETE */ |
-} |
- |
-/* Update a row in the %_content table; add delete doclists to |
-** pendingTerms for old terms not in the new data, add insert doclists |
-** to pendingTerms for terms in the new data. |
-*/ |
-static int index_update(fulltext_vtab *v, sqlite_int64 iRow, |
- sqlite3_value **pValues){ |
- int rc = initPendingTerms(v, iRow); |
- if( rc!=SQLITE_OK ) return rc; |
- |
- /* Generate an empty doclist for each term that previously appeared in this |
- * row. */ |
- rc = deleteTerms(v, iRow); |
- if( rc!=SQLITE_OK ) return rc; |
- |
- rc = content_update(v, pValues, iRow); /* execute an SQL UPDATE */ |
- if( rc!=SQLITE_OK ) return rc; |
- |
- /* Now add positions for terms which appear in the updated row. */ |
- return insertTerms(v, iRow, pValues); |
-} |
- |
-/*******************************************************************/ |
-/* InteriorWriter is used to collect terms and block references into |
-** interior nodes in %_segments. See commentary at top of file for |
-** format. |
-*/ |
- |
-/* How large interior nodes can grow. */ |
-#define INTERIOR_MAX 2048 |
- |
-/* Minimum number of terms per interior node (except the root). This |
-** prevents large terms from making the tree too skinny - must be >0 |
-** so that the tree always makes progress. Note that the min tree |
-** fanout will be INTERIOR_MIN_TERMS+1. |
-*/ |
-#define INTERIOR_MIN_TERMS 7 |
-#if INTERIOR_MIN_TERMS<1 |
-# error INTERIOR_MIN_TERMS must be greater than 0. |
-#endif |
- |
-/* ROOT_MAX controls how much data is stored inline in the segment |
-** directory. |
-*/ |
-/* TODO(shess) Push ROOT_MAX down to whoever is writing things. It's |
-** only here so that interiorWriterRootInfo() and leafWriterRootInfo() |
-** can both see it, but if the caller passed it in, we wouldn't even |
-** need a define. |
-*/ |
-#define ROOT_MAX 1024 |
-#if ROOT_MAX<VARINT_MAX*2 |
-# error ROOT_MAX must have enough space for a header. |
-#endif |
- |
-/* InteriorBlock stores a linked-list of interior blocks while a lower |
-** layer is being constructed. |
-*/ |
-typedef struct InteriorBlock { |
- DataBuffer term; /* Leftmost term in block's subtree. */ |
- DataBuffer data; /* Accumulated data for the block. */ |
- struct InteriorBlock *next; |
-} InteriorBlock; |
- |
-static InteriorBlock *interiorBlockNew(int iHeight, sqlite_int64 iChildBlock, |
- const char *pTerm, int nTerm){ |
- InteriorBlock *block = sqlite3_malloc(sizeof(InteriorBlock)); |
- char c[VARINT_MAX+VARINT_MAX]; |
- int n; |
- |
- if( block ){ |
- memset(block, 0, sizeof(*block)); |
- dataBufferInit(&block->term, 0); |
- dataBufferReplace(&block->term, pTerm, nTerm); |
- |
- n = fts3PutVarint(c, iHeight); |
- n += fts3PutVarint(c+n, iChildBlock); |
- dataBufferInit(&block->data, INTERIOR_MAX); |
- dataBufferReplace(&block->data, c, n); |
- } |
- return block; |
-} |
- |
-#ifndef NDEBUG |
-/* Verify that the data is readable as an interior node. */ |
-static void interiorBlockValidate(InteriorBlock *pBlock){ |
- const char *pData = pBlock->data.pData; |
- int nData = pBlock->data.nData; |
- int n, iDummy; |
- sqlite_int64 iBlockid; |
- |
- assert( nData>0 ); |
- assert( pData!=0 ); |
- assert( pData+nData>pData ); |
- |
- /* Must lead with height of node as a varint(n), n>0 */ |
- n = fts3GetVarint32(pData, &iDummy); |
- assert( n>0 ); |
- assert( iDummy>0 ); |
- assert( n<nData ); |
- pData += n; |
- nData -= n; |
- |
- /* Must contain iBlockid. */ |
- n = fts3GetVarint(pData, &iBlockid); |
- assert( n>0 ); |
- assert( n<=nData ); |
- pData += n; |
- nData -= n; |
- |
- /* Zero or more terms of positive length */ |
- if( nData!=0 ){ |
- /* First term is not delta-encoded. */ |
- n = fts3GetVarint32(pData, &iDummy); |
- assert( n>0 ); |
- assert( iDummy>0 ); |
- assert( n+iDummy>0); |
- assert( n+iDummy<=nData ); |
- pData += n+iDummy; |
- nData -= n+iDummy; |
- |
- /* Following terms delta-encoded. */ |
- while( nData!=0 ){ |
- /* Length of shared prefix. */ |
- n = fts3GetVarint32(pData, &iDummy); |
- assert( n>0 ); |
- assert( iDummy>=0 ); |
- assert( n<nData ); |
- pData += n; |
- nData -= n; |
- |
- /* Length and data of distinct suffix. */ |
- n = fts3GetVarint32(pData, &iDummy); |
- assert( n>0 ); |
- assert( iDummy>0 ); |
- assert( n+iDummy>0); |
- assert( n+iDummy<=nData ); |
- pData += n+iDummy; |
- nData -= n+iDummy; |
- } |
- } |
-} |
-#define ASSERT_VALID_INTERIOR_BLOCK(x) interiorBlockValidate(x) |
-#else |
-#define ASSERT_VALID_INTERIOR_BLOCK(x) assert( 1 ) |
-#endif |
- |
-typedef struct InteriorWriter { |
- int iHeight; /* from 0 at leaves. */ |
- InteriorBlock *first, *last; |
- struct InteriorWriter *parentWriter; |
- |
- DataBuffer term; /* Last term written to block "last". */ |
- sqlite_int64 iOpeningChildBlock; /* First child block in block "last". */ |
-#ifndef NDEBUG |
- sqlite_int64 iLastChildBlock; /* for consistency checks. */ |
-#endif |
-} InteriorWriter; |
- |
-/* Initialize an interior node where pTerm[nTerm] marks the leftmost |
-** term in the tree. iChildBlock is the leftmost child block at the |
-** next level down the tree. |
-*/ |
-static void interiorWriterInit(int iHeight, const char *pTerm, int nTerm, |
- sqlite_int64 iChildBlock, |
- InteriorWriter *pWriter){ |
- InteriorBlock *block; |
- assert( iHeight>0 ); |
- CLEAR(pWriter); |
- |
- pWriter->iHeight = iHeight; |
- pWriter->iOpeningChildBlock = iChildBlock; |
-#ifndef NDEBUG |
- pWriter->iLastChildBlock = iChildBlock; |
-#endif |
- block = interiorBlockNew(iHeight, iChildBlock, pTerm, nTerm); |
- pWriter->last = pWriter->first = block; |
- ASSERT_VALID_INTERIOR_BLOCK(pWriter->last); |
- dataBufferInit(&pWriter->term, 0); |
-} |
- |
-/* Append the child node rooted at iChildBlock to the interior node, |
-** with pTerm[nTerm] as the leftmost term in iChildBlock's subtree. |
-*/ |
-static void interiorWriterAppend(InteriorWriter *pWriter, |
- const char *pTerm, int nTerm, |
- sqlite_int64 iChildBlock){ |
- char c[VARINT_MAX+VARINT_MAX]; |
- int n, nPrefix = 0; |
- |
- ASSERT_VALID_INTERIOR_BLOCK(pWriter->last); |
- |
- /* The first term written into an interior node is actually |
- ** associated with the second child added (the first child was added |
- ** in interiorWriterInit, or in the if clause at the bottom of this |
- ** function). That term gets encoded straight up, with nPrefix left |
- ** at 0. |
- */ |
- if( pWriter->term.nData==0 ){ |
- n = fts3PutVarint(c, nTerm); |
- }else{ |
- while( nPrefix<pWriter->term.nData && |
- pTerm[nPrefix]==pWriter->term.pData[nPrefix] ){ |
- nPrefix++; |
- } |
- |
- n = fts3PutVarint(c, nPrefix); |
- n += fts3PutVarint(c+n, nTerm-nPrefix); |
- } |
- |
-#ifndef NDEBUG |
- pWriter->iLastChildBlock++; |
-#endif |
- assert( pWriter->iLastChildBlock==iChildBlock ); |
- |
- /* Overflow to a new block if the new term makes the current block |
- ** too big, and the current block already has enough terms. |
- */ |
- if( pWriter->last->data.nData+n+nTerm-nPrefix>INTERIOR_MAX && |
- iChildBlock-pWriter->iOpeningChildBlock>INTERIOR_MIN_TERMS ){ |
- pWriter->last->next = interiorBlockNew(pWriter->iHeight, iChildBlock, |
- pTerm, nTerm); |
- pWriter->last = pWriter->last->next; |
- pWriter->iOpeningChildBlock = iChildBlock; |
- dataBufferReset(&pWriter->term); |
- }else{ |
- dataBufferAppend2(&pWriter->last->data, c, n, |
- pTerm+nPrefix, nTerm-nPrefix); |
- dataBufferReplace(&pWriter->term, pTerm, nTerm); |
- } |
- ASSERT_VALID_INTERIOR_BLOCK(pWriter->last); |
-} |
- |
-/* Free the space used by pWriter, including the linked-list of |
-** InteriorBlocks, and parentWriter, if present. |
-*/ |
-static int interiorWriterDestroy(InteriorWriter *pWriter){ |
- InteriorBlock *block = pWriter->first; |
- |
- while( block!=NULL ){ |
- InteriorBlock *b = block; |
- block = block->next; |
- dataBufferDestroy(&b->term); |
- dataBufferDestroy(&b->data); |
- sqlite3_free(b); |
- } |
- if( pWriter->parentWriter!=NULL ){ |
- interiorWriterDestroy(pWriter->parentWriter); |
- sqlite3_free(pWriter->parentWriter); |
- } |
- dataBufferDestroy(&pWriter->term); |
- SCRAMBLE(pWriter); |
- return SQLITE_OK; |
-} |
- |
-/* If pWriter can fit entirely in ROOT_MAX, return it as the root info |
-** directly, leaving *piEndBlockid unchanged. Otherwise, flush |
-** pWriter to %_segments, building a new layer of interior nodes, and |
-** recursively ask for their root into. |
-*/ |
-static int interiorWriterRootInfo(fulltext_vtab *v, InteriorWriter *pWriter, |
- char **ppRootInfo, int *pnRootInfo, |
- sqlite_int64 *piEndBlockid){ |
- InteriorBlock *block = pWriter->first; |
- sqlite_int64 iBlockid = 0; |
- int rc; |
- |
- /* If we can fit the segment inline */ |
- if( block==pWriter->last && block->data.nData<ROOT_MAX ){ |
- *ppRootInfo = block->data.pData; |
- *pnRootInfo = block->data.nData; |
- return SQLITE_OK; |
- } |
- |
- /* Flush the first block to %_segments, and create a new level of |
- ** interior node. |
- */ |
- ASSERT_VALID_INTERIOR_BLOCK(block); |
- rc = block_insert(v, block->data.pData, block->data.nData, &iBlockid); |
- if( rc!=SQLITE_OK ) return rc; |
- *piEndBlockid = iBlockid; |
- |
- pWriter->parentWriter = sqlite3_malloc(sizeof(*pWriter->parentWriter)); |
- interiorWriterInit(pWriter->iHeight+1, |
- block->term.pData, block->term.nData, |
- iBlockid, pWriter->parentWriter); |
- |
- /* Flush additional blocks and append to the higher interior |
- ** node. |
- */ |
- for(block=block->next; block!=NULL; block=block->next){ |
- ASSERT_VALID_INTERIOR_BLOCK(block); |
- rc = block_insert(v, block->data.pData, block->data.nData, &iBlockid); |
- if( rc!=SQLITE_OK ) return rc; |
- *piEndBlockid = iBlockid; |
- |
- interiorWriterAppend(pWriter->parentWriter, |
- block->term.pData, block->term.nData, iBlockid); |
- } |
- |
- /* Parent node gets the chance to be the root. */ |
- return interiorWriterRootInfo(v, pWriter->parentWriter, |
- ppRootInfo, pnRootInfo, piEndBlockid); |
-} |
- |
-/****************************************************************/ |
-/* InteriorReader is used to read off the data from an interior node |
-** (see comment at top of file for the format). |
-*/ |
-typedef struct InteriorReader { |
- const char *pData; |
- int nData; |
- |
- DataBuffer term; /* previous term, for decoding term delta. */ |
- |
- sqlite_int64 iBlockid; |
-} InteriorReader; |
- |
-static void interiorReaderDestroy(InteriorReader *pReader){ |
- dataBufferDestroy(&pReader->term); |
- SCRAMBLE(pReader); |
-} |
- |
-static int interiorReaderInit(const char *pData, int nData, |
- InteriorReader *pReader){ |
- int n, nTerm; |
- |
- /* These conditions are checked and met by the callers. */ |
- assert( nData>0 ); |
- assert( pData[0]!='\0' ); |
- |
- CLEAR(pReader); |
- |
- /* Decode the base blockid, and set the cursor to the first term. */ |
- n = fts3GetVarintSafe(pData+1, &pReader->iBlockid, nData-1); |
- if( !n ) return SQLITE_CORRUPT_BKPT; |
- pReader->pData = pData+1+n; |
- pReader->nData = nData-(1+n); |
- |
- /* A single-child interior node (such as when a leaf node was too |
- ** large for the segment directory) won't have any terms. |
- ** Otherwise, decode the first term. |
- */ |
- if( pReader->nData==0 ){ |
- dataBufferInit(&pReader->term, 0); |
- }else{ |
- n = fts3GetVarint32Safe(pReader->pData, &nTerm, pReader->nData); |
- if( !n || nTerm<0 || nTerm>pReader->nData-n) return SQLITE_CORRUPT_BKPT; |
- dataBufferInit(&pReader->term, nTerm); |
- dataBufferReplace(&pReader->term, pReader->pData+n, nTerm); |
- pReader->pData += n+nTerm; |
- pReader->nData -= n+nTerm; |
- } |
- return SQLITE_OK; |
-} |
- |
-static int interiorReaderAtEnd(InteriorReader *pReader){ |
- return pReader->term.nData<=0; |
-} |
- |
-static sqlite_int64 interiorReaderCurrentBlockid(InteriorReader *pReader){ |
- return pReader->iBlockid; |
-} |
- |
-static int interiorReaderTermBytes(InteriorReader *pReader){ |
- assert( !interiorReaderAtEnd(pReader) ); |
- return pReader->term.nData; |
-} |
-static const char *interiorReaderTerm(InteriorReader *pReader){ |
- assert( !interiorReaderAtEnd(pReader) ); |
- return pReader->term.pData; |
-} |
- |
-/* Step forward to the next term in the node. */ |
-static int interiorReaderStep(InteriorReader *pReader){ |
- assert( !interiorReaderAtEnd(pReader) ); |
- |
- /* If the last term has been read, signal eof, else construct the |
- ** next term. |
- */ |
- if( pReader->nData==0 ){ |
- dataBufferReset(&pReader->term); |
- }else{ |
- int n, nPrefix, nSuffix; |
- |
- n = fts3GetVarint32Safe(pReader->pData, &nPrefix, pReader->nData); |
- if( !n ) return SQLITE_CORRUPT_BKPT; |
- pReader->nData -= n; |
- pReader->pData += n; |
- n = fts3GetVarint32Safe(pReader->pData, &nSuffix, pReader->nData); |
- if( !n ) return SQLITE_CORRUPT_BKPT; |
- pReader->nData -= n; |
- pReader->pData += n; |
- if( nSuffix<0 || nSuffix>pReader->nData ) return SQLITE_CORRUPT_BKPT; |
- if( nPrefix<0 || nPrefix>pReader->term.nData ) return SQLITE_CORRUPT_BKPT; |
- |
- /* Truncate the current term and append suffix data. */ |
- pReader->term.nData = nPrefix; |
- dataBufferAppend(&pReader->term, pReader->pData, nSuffix); |
- |
- pReader->pData += nSuffix; |
- pReader->nData -= nSuffix; |
- } |
- pReader->iBlockid++; |
- return SQLITE_OK; |
-} |
- |
-/* Compare the current term to pTerm[nTerm], returning strcmp-style |
-** results. If isPrefix, equality means equal through nTerm bytes. |
-*/ |
-static int interiorReaderTermCmp(InteriorReader *pReader, |
- const char *pTerm, int nTerm, int isPrefix){ |
- const char *pReaderTerm = interiorReaderTerm(pReader); |
- int nReaderTerm = interiorReaderTermBytes(pReader); |
- int c, n = nReaderTerm<nTerm ? nReaderTerm : nTerm; |
- |
- if( n==0 ){ |
- if( nReaderTerm>0 ) return -1; |
- if( nTerm>0 ) return 1; |
- return 0; |
- } |
- |
- c = memcmp(pReaderTerm, pTerm, n); |
- if( c!=0 ) return c; |
- if( isPrefix && n==nTerm ) return 0; |
- return nReaderTerm - nTerm; |
-} |
- |
-/****************************************************************/ |
-/* LeafWriter is used to collect terms and associated doclist data |
-** into leaf blocks in %_segments (see top of file for format info). |
-** Expected usage is: |
-** |
-** LeafWriter writer; |
-** leafWriterInit(0, 0, &writer); |
-** while( sorted_terms_left_to_process ){ |
-** // data is doclist data for that term. |
-** rc = leafWriterStep(v, &writer, pTerm, nTerm, pData, nData); |
-** if( rc!=SQLITE_OK ) goto err; |
-** } |
-** rc = leafWriterFinalize(v, &writer); |
-**err: |
-** leafWriterDestroy(&writer); |
-** return rc; |
-** |
-** leafWriterStep() may write a collected leaf out to %_segments. |
-** leafWriterFinalize() finishes writing any buffered data and stores |
-** a root node in %_segdir. leafWriterDestroy() frees all buffers and |
-** InteriorWriters allocated as part of writing this segment. |
-** |
-** TODO(shess) Document leafWriterStepMerge(). |
-*/ |
- |
-/* Put terms with data this big in their own block. */ |
-#define STANDALONE_MIN 1024 |
- |
-/* Keep leaf blocks below this size. */ |
-#define LEAF_MAX 2048 |
- |
-typedef struct LeafWriter { |
- int iLevel; |
- int idx; |
- sqlite_int64 iStartBlockid; /* needed to create the root info */ |
- sqlite_int64 iEndBlockid; /* when we're done writing. */ |
- |
- DataBuffer term; /* previous encoded term */ |
- DataBuffer data; /* encoding buffer */ |
- |
- /* bytes of first term in the current node which distinguishes that |
- ** term from the last term of the previous node. |
- */ |
- int nTermDistinct; |
- |
- InteriorWriter parentWriter; /* if we overflow */ |
- int has_parent; |
-} LeafWriter; |
- |
-static void leafWriterInit(int iLevel, int idx, LeafWriter *pWriter){ |
- CLEAR(pWriter); |
- pWriter->iLevel = iLevel; |
- pWriter->idx = idx; |
- |
- dataBufferInit(&pWriter->term, 32); |
- |
- /* Start out with a reasonably sized block, though it can grow. */ |
- dataBufferInit(&pWriter->data, LEAF_MAX); |
-} |
- |
-#ifndef NDEBUG |
-/* Verify that the data is readable as a leaf node. */ |
-static void leafNodeValidate(const char *pData, int nData){ |
- int n, iDummy; |
- |
- if( nData==0 ) return; |
- assert( nData>0 ); |
- assert( pData!=0 ); |
- assert( pData+nData>pData ); |
- |
- /* Must lead with a varint(0) */ |
- n = fts3GetVarint32(pData, &iDummy); |
- assert( iDummy==0 ); |
- assert( n>0 ); |
- assert( n<nData ); |
- pData += n; |
- nData -= n; |
- |
- /* Leading term length and data must fit in buffer. */ |
- n = fts3GetVarint32(pData, &iDummy); |
- assert( n>0 ); |
- assert( iDummy>0 ); |
- assert( n+iDummy>0 ); |
- assert( n+iDummy<nData ); |
- pData += n+iDummy; |
- nData -= n+iDummy; |
- |
- /* Leading term's doclist length and data must fit. */ |
- n = fts3GetVarint32(pData, &iDummy); |
- assert( n>0 ); |
- assert( iDummy>0 ); |
- assert( n+iDummy>0 ); |
- assert( n+iDummy<=nData ); |
- ASSERT_VALID_DOCLIST(DL_DEFAULT, pData+n, iDummy, NULL); |
- pData += n+iDummy; |
- nData -= n+iDummy; |
- |
- /* Verify that trailing terms and doclists also are readable. */ |
- while( nData!=0 ){ |
- n = fts3GetVarint32(pData, &iDummy); |
- assert( n>0 ); |
- assert( iDummy>=0 ); |
- assert( n<nData ); |
- pData += n; |
- nData -= n; |
- n = fts3GetVarint32(pData, &iDummy); |
- assert( n>0 ); |
- assert( iDummy>0 ); |
- assert( n+iDummy>0 ); |
- assert( n+iDummy<nData ); |
- pData += n+iDummy; |
- nData -= n+iDummy; |
- |
- n = fts3GetVarint32(pData, &iDummy); |
- assert( n>0 ); |
- assert( iDummy>0 ); |
- assert( n+iDummy>0 ); |
- assert( n+iDummy<=nData ); |
- ASSERT_VALID_DOCLIST(DL_DEFAULT, pData+n, iDummy, NULL); |
- pData += n+iDummy; |
- nData -= n+iDummy; |
- } |
-} |
-#define ASSERT_VALID_LEAF_NODE(p, n) leafNodeValidate(p, n) |
-#else |
-#define ASSERT_VALID_LEAF_NODE(p, n) assert( 1 ) |
-#endif |
- |
-/* Flush the current leaf node to %_segments, and adding the resulting |
-** blockid and the starting term to the interior node which will |
-** contain it. |
-*/ |
-static int leafWriterInternalFlush(fulltext_vtab *v, LeafWriter *pWriter, |
- int iData, int nData){ |
- sqlite_int64 iBlockid = 0; |
- const char *pStartingTerm; |
- int nStartingTerm, rc, n; |
- |
- /* Must have the leading varint(0) flag, plus at least some |
- ** valid-looking data. |
- */ |
- assert( nData>2 ); |
- assert( iData>=0 ); |
- assert( iData+nData<=pWriter->data.nData ); |
- ASSERT_VALID_LEAF_NODE(pWriter->data.pData+iData, nData); |
- |
- rc = block_insert(v, pWriter->data.pData+iData, nData, &iBlockid); |
- if( rc!=SQLITE_OK ) return rc; |
- assert( iBlockid!=0 ); |
- |
- /* Reconstruct the first term in the leaf for purposes of building |
- ** the interior node. |
- */ |
- n = fts3GetVarint32(pWriter->data.pData+iData+1, &nStartingTerm); |
- pStartingTerm = pWriter->data.pData+iData+1+n; |
- assert( pWriter->data.nData>iData+1+n+nStartingTerm ); |
- assert( pWriter->nTermDistinct>0 ); |
- assert( pWriter->nTermDistinct<=nStartingTerm ); |
- nStartingTerm = pWriter->nTermDistinct; |
- |
- if( pWriter->has_parent ){ |
- interiorWriterAppend(&pWriter->parentWriter, |
- pStartingTerm, nStartingTerm, iBlockid); |
- }else{ |
- interiorWriterInit(1, pStartingTerm, nStartingTerm, iBlockid, |
- &pWriter->parentWriter); |
- pWriter->has_parent = 1; |
- } |
- |
- /* Track the span of this segment's leaf nodes. */ |
- if( pWriter->iEndBlockid==0 ){ |
- pWriter->iEndBlockid = pWriter->iStartBlockid = iBlockid; |
- }else{ |
- pWriter->iEndBlockid++; |
- assert( iBlockid==pWriter->iEndBlockid ); |
- } |
- |
- return SQLITE_OK; |
-} |
-static int leafWriterFlush(fulltext_vtab *v, LeafWriter *pWriter){ |
- int rc = leafWriterInternalFlush(v, pWriter, 0, pWriter->data.nData); |
- if( rc!=SQLITE_OK ) return rc; |
- |
- /* Re-initialize the output buffer. */ |
- dataBufferReset(&pWriter->data); |
- |
- return SQLITE_OK; |
-} |
- |
-/* Fetch the root info for the segment. If the entire leaf fits |
-** within ROOT_MAX, then it will be returned directly, otherwise it |
-** will be flushed and the root info will be returned from the |
-** interior node. *piEndBlockid is set to the blockid of the last |
-** interior or leaf node written to disk (0 if none are written at |
-** all). |
-*/ |
-static int leafWriterRootInfo(fulltext_vtab *v, LeafWriter *pWriter, |
- char **ppRootInfo, int *pnRootInfo, |
- sqlite_int64 *piEndBlockid){ |
- /* we can fit the segment entirely inline */ |
- if( !pWriter->has_parent && pWriter->data.nData<ROOT_MAX ){ |
- *ppRootInfo = pWriter->data.pData; |
- *pnRootInfo = pWriter->data.nData; |
- *piEndBlockid = 0; |
- return SQLITE_OK; |
- } |
- |
- /* Flush remaining leaf data. */ |
- if( pWriter->data.nData>0 ){ |
- int rc = leafWriterFlush(v, pWriter); |
- if( rc!=SQLITE_OK ) return rc; |
- } |
- |
- /* We must have flushed a leaf at some point. */ |
- assert( pWriter->has_parent ); |
- |
- /* Tenatively set the end leaf blockid as the end blockid. If the |
- ** interior node can be returned inline, this will be the final |
- ** blockid, otherwise it will be overwritten by |
- ** interiorWriterRootInfo(). |
- */ |
- *piEndBlockid = pWriter->iEndBlockid; |
- |
- return interiorWriterRootInfo(v, &pWriter->parentWriter, |
- ppRootInfo, pnRootInfo, piEndBlockid); |
-} |
- |
-/* Collect the rootInfo data and store it into the segment directory. |
-** This has the effect of flushing the segment's leaf data to |
-** %_segments, and also flushing any interior nodes to %_segments. |
-*/ |
-static int leafWriterFinalize(fulltext_vtab *v, LeafWriter *pWriter){ |
- sqlite_int64 iEndBlockid; |
- char *pRootInfo; |
- int rc, nRootInfo; |
- |
- rc = leafWriterRootInfo(v, pWriter, &pRootInfo, &nRootInfo, &iEndBlockid); |
- if( rc!=SQLITE_OK ) return rc; |
- |
- /* Don't bother storing an entirely empty segment. */ |
- if( iEndBlockid==0 && nRootInfo==0 ) return SQLITE_OK; |
- |
- return segdir_set(v, pWriter->iLevel, pWriter->idx, |
- pWriter->iStartBlockid, pWriter->iEndBlockid, |
- iEndBlockid, pRootInfo, nRootInfo); |
-} |
- |
-static void leafWriterDestroy(LeafWriter *pWriter){ |
- if( pWriter->has_parent ) interiorWriterDestroy(&pWriter->parentWriter); |
- dataBufferDestroy(&pWriter->term); |
- dataBufferDestroy(&pWriter->data); |
-} |
- |
-/* Encode a term into the leafWriter, delta-encoding as appropriate. |
-** Returns the length of the new term which distinguishes it from the |
-** previous term, which can be used to set nTermDistinct when a node |
-** boundary is crossed. |
-*/ |
-static int leafWriterEncodeTerm(LeafWriter *pWriter, |
- const char *pTerm, int nTerm){ |
- char c[VARINT_MAX+VARINT_MAX]; |
- int n, nPrefix = 0; |
- |
- assert( nTerm>0 ); |
- while( nPrefix<pWriter->term.nData && |
- pTerm[nPrefix]==pWriter->term.pData[nPrefix] ){ |
- nPrefix++; |
- /* Failing this implies that the terms weren't in order. */ |
- assert( nPrefix<nTerm ); |
- } |
- |
- if( pWriter->data.nData==0 ){ |
- /* Encode the node header and leading term as: |
- ** varint(0) |
- ** varint(nTerm) |
- ** char pTerm[nTerm] |
- */ |
- n = fts3PutVarint(c, '\0'); |
- n += fts3PutVarint(c+n, nTerm); |
- dataBufferAppend2(&pWriter->data, c, n, pTerm, nTerm); |
- }else{ |
- /* Delta-encode the term as: |
- ** varint(nPrefix) |
- ** varint(nSuffix) |
- ** char pTermSuffix[nSuffix] |
- */ |
- n = fts3PutVarint(c, nPrefix); |
- n += fts3PutVarint(c+n, nTerm-nPrefix); |
- dataBufferAppend2(&pWriter->data, c, n, pTerm+nPrefix, nTerm-nPrefix); |
- } |
- dataBufferReplace(&pWriter->term, pTerm, nTerm); |
- |
- return nPrefix+1; |
-} |
- |
-/* Used to avoid a memmove when a large amount of doclist data is in |
-** the buffer. This constructs a node and term header before |
-** iDoclistData and flushes the resulting complete node using |
-** leafWriterInternalFlush(). |
-*/ |
-static int leafWriterInlineFlush(fulltext_vtab *v, LeafWriter *pWriter, |
- const char *pTerm, int nTerm, |
- int iDoclistData){ |
- char c[VARINT_MAX+VARINT_MAX]; |
- int iData, n = fts3PutVarint(c, 0); |
- n += fts3PutVarint(c+n, nTerm); |
- |
- /* There should always be room for the header. Even if pTerm shared |
- ** a substantial prefix with the previous term, the entire prefix |
- ** could be constructed from earlier data in the doclist, so there |
- ** should be room. |
- */ |
- assert( iDoclistData>=n+nTerm ); |
- |
- iData = iDoclistData-(n+nTerm); |
- memcpy(pWriter->data.pData+iData, c, n); |
- memcpy(pWriter->data.pData+iData+n, pTerm, nTerm); |
- |
- return leafWriterInternalFlush(v, pWriter, iData, pWriter->data.nData-iData); |
-} |
- |
-/* Push pTerm[nTerm] along with the doclist data to the leaf layer of |
-** %_segments. |
-*/ |
-static int leafWriterStepMerge(fulltext_vtab *v, LeafWriter *pWriter, |
- const char *pTerm, int nTerm, |
- DLReader *pReaders, int nReaders){ |
- char c[VARINT_MAX+VARINT_MAX]; |
- int iTermData = pWriter->data.nData, iDoclistData; |
- int i, nData, n, nActualData, nActual, rc, nTermDistinct; |
- |
- ASSERT_VALID_LEAF_NODE(pWriter->data.pData, pWriter->data.nData); |
- nTermDistinct = leafWriterEncodeTerm(pWriter, pTerm, nTerm); |
- |
- /* Remember nTermDistinct if opening a new node. */ |
- if( iTermData==0 ) pWriter->nTermDistinct = nTermDistinct; |
- |
- iDoclistData = pWriter->data.nData; |
- |
- /* Estimate the length of the merged doclist so we can leave space |
- ** to encode it. |
- */ |
- for(i=0, nData=0; i<nReaders; i++){ |
- nData += dlrAllDataBytes(&pReaders[i]); |
- } |
- n = fts3PutVarint(c, nData); |
- dataBufferAppend(&pWriter->data, c, n); |
- |
- rc = docListMerge(&pWriter->data, pReaders, nReaders); |
- if( rc!=SQLITE_OK ) return rc; |
- ASSERT_VALID_DOCLIST(DL_DEFAULT, |
- pWriter->data.pData+iDoclistData+n, |
- pWriter->data.nData-iDoclistData-n, NULL); |
- |
- /* The actual amount of doclist data at this point could be smaller |
- ** than the length we encoded. Additionally, the space required to |
- ** encode this length could be smaller. For small doclists, this is |
- ** not a big deal, we can just use memmove() to adjust things. |
- */ |
- nActualData = pWriter->data.nData-(iDoclistData+n); |
- nActual = fts3PutVarint(c, nActualData); |
- assert( nActualData<=nData ); |
- assert( nActual<=n ); |
- |
- /* If the new doclist is big enough for force a standalone leaf |
- ** node, we can immediately flush it inline without doing the |
- ** memmove(). |
- */ |
- /* TODO(shess) This test matches leafWriterStep(), which does this |
- ** test before it knows the cost to varint-encode the term and |
- ** doclist lengths. At some point, change to |
- ** pWriter->data.nData-iTermData>STANDALONE_MIN. |
- */ |
- if( nTerm+nActualData>STANDALONE_MIN ){ |
- /* Push leaf node from before this term. */ |
- if( iTermData>0 ){ |
- rc = leafWriterInternalFlush(v, pWriter, 0, iTermData); |
- if( rc!=SQLITE_OK ) return rc; |
- |
- pWriter->nTermDistinct = nTermDistinct; |
- } |
- |
- /* Fix the encoded doclist length. */ |
- iDoclistData += n - nActual; |
- memcpy(pWriter->data.pData+iDoclistData, c, nActual); |
- |
- /* Push the standalone leaf node. */ |
- rc = leafWriterInlineFlush(v, pWriter, pTerm, nTerm, iDoclistData); |
- if( rc!=SQLITE_OK ) return rc; |
- |
- /* Leave the node empty. */ |
- dataBufferReset(&pWriter->data); |
- |
- return rc; |
- } |
- |
- /* At this point, we know that the doclist was small, so do the |
- ** memmove if indicated. |
- */ |
- if( nActual<n ){ |
- memmove(pWriter->data.pData+iDoclistData+nActual, |
- pWriter->data.pData+iDoclistData+n, |
- pWriter->data.nData-(iDoclistData+n)); |
- pWriter->data.nData -= n-nActual; |
- } |
- |
- /* Replace written length with actual length. */ |
- memcpy(pWriter->data.pData+iDoclistData, c, nActual); |
- |
- /* If the node is too large, break things up. */ |
- /* TODO(shess) This test matches leafWriterStep(), which does this |
- ** test before it knows the cost to varint-encode the term and |
- ** doclist lengths. At some point, change to |
- ** pWriter->data.nData>LEAF_MAX. |
- */ |
- if( iTermData+nTerm+nActualData>LEAF_MAX ){ |
- /* Flush out the leading data as a node */ |
- rc = leafWriterInternalFlush(v, pWriter, 0, iTermData); |
- if( rc!=SQLITE_OK ) return rc; |
- |
- pWriter->nTermDistinct = nTermDistinct; |
- |
- /* Rebuild header using the current term */ |
- n = fts3PutVarint(pWriter->data.pData, 0); |
- n += fts3PutVarint(pWriter->data.pData+n, nTerm); |
- memcpy(pWriter->data.pData+n, pTerm, nTerm); |
- n += nTerm; |
- |
- /* There should always be room, because the previous encoding |
- ** included all data necessary to construct the term. |
- */ |
- assert( n<iDoclistData ); |
- /* So long as STANDALONE_MIN is half or less of LEAF_MAX, the |
- ** following memcpy() is safe (as opposed to needing a memmove). |
- */ |
- assert( 2*STANDALONE_MIN<=LEAF_MAX ); |
- assert( n+pWriter->data.nData-iDoclistData<iDoclistData ); |
- memcpy(pWriter->data.pData+n, |
- pWriter->data.pData+iDoclistData, |
- pWriter->data.nData-iDoclistData); |
- pWriter->data.nData -= iDoclistData-n; |
- } |
- ASSERT_VALID_LEAF_NODE(pWriter->data.pData, pWriter->data.nData); |
- |
- return SQLITE_OK; |
-} |
- |
-/* Push pTerm[nTerm] along with the doclist data to the leaf layer of |
-** %_segments. |
-*/ |
-/* TODO(shess) Revise writeZeroSegment() so that doclists are |
-** constructed directly in pWriter->data. |
-*/ |
-static int leafWriterStep(fulltext_vtab *v, LeafWriter *pWriter, |
- const char *pTerm, int nTerm, |
- const char *pData, int nData){ |
- int rc; |
- DLReader reader; |
- |
- rc = dlrInit(&reader, DL_DEFAULT, pData, nData); |
- if( rc!=SQLITE_OK ) return rc; |
- rc = leafWriterStepMerge(v, pWriter, pTerm, nTerm, &reader, 1); |
- dlrDestroy(&reader); |
- |
- return rc; |
-} |
- |
- |
-/****************************************************************/ |
-/* LeafReader is used to iterate over an individual leaf node. */ |
-typedef struct LeafReader { |
- DataBuffer term; /* copy of current term. */ |
- |
- const char *pData; /* data for current term. */ |
- int nData; |
-} LeafReader; |
- |
-static void leafReaderDestroy(LeafReader *pReader){ |
- dataBufferDestroy(&pReader->term); |
- SCRAMBLE(pReader); |
-} |
- |
-static int leafReaderAtEnd(LeafReader *pReader){ |
- return pReader->nData<=0; |
-} |
- |
-/* Access the current term. */ |
-static int leafReaderTermBytes(LeafReader *pReader){ |
- return pReader->term.nData; |
-} |
-static const char *leafReaderTerm(LeafReader *pReader){ |
- assert( pReader->term.nData>0 ); |
- return pReader->term.pData; |
-} |
- |
-/* Access the doclist data for the current term. */ |
-static int leafReaderDataBytes(LeafReader *pReader){ |
- int nData; |
- assert( pReader->term.nData>0 ); |
- fts3GetVarint32(pReader->pData, &nData); |
- return nData; |
-} |
-static const char *leafReaderData(LeafReader *pReader){ |
- int n, nData; |
- assert( pReader->term.nData>0 ); |
- n = fts3GetVarint32Safe(pReader->pData, &nData, pReader->nData); |
- if( !n || nData>pReader->nData-n ) return NULL; |
- return pReader->pData+n; |
-} |
- |
-static int leafReaderInit(const char *pData, int nData, |
- LeafReader *pReader){ |
- int nTerm, n; |
- |
- /* All callers check this precondition. */ |
- assert( nData>0 ); |
- assert( pData[0]=='\0' ); |
- |
- CLEAR(pReader); |
- |
- /* Read the first term, skipping the header byte. */ |
- n = fts3GetVarint32Safe(pData+1, &nTerm, nData-1); |
- if( !n || nTerm<0 || nTerm>nData-1-n ) return SQLITE_CORRUPT_BKPT; |
- dataBufferInit(&pReader->term, nTerm); |
- dataBufferReplace(&pReader->term, pData+1+n, nTerm); |
- |
- /* Position after the first term. */ |
- pReader->pData = pData+1+n+nTerm; |
- pReader->nData = nData-1-n-nTerm; |
- return SQLITE_OK; |
-} |
- |
-/* Step the reader forward to the next term. */ |
-static int leafReaderStep(LeafReader *pReader){ |
- int n, nData, nPrefix, nSuffix; |
- assert( !leafReaderAtEnd(pReader) ); |
- |
- /* Skip previous entry's data block. */ |
- n = fts3GetVarint32Safe(pReader->pData, &nData, pReader->nData); |
- if( !n || nData<0 || nData>pReader->nData-n ) return SQLITE_CORRUPT_BKPT; |
- pReader->pData += n+nData; |
- pReader->nData -= n+nData; |
- |
- if( !leafReaderAtEnd(pReader) ){ |
- /* Construct the new term using a prefix from the old term plus a |
- ** suffix from the leaf data. |
- */ |
- n = fts3GetVarint32Safe(pReader->pData, &nPrefix, pReader->nData); |
- if( !n ) return SQLITE_CORRUPT_BKPT; |
- pReader->nData -= n; |
- pReader->pData += n; |
- n = fts3GetVarint32Safe(pReader->pData, &nSuffix, pReader->nData); |
- if( !n ) return SQLITE_CORRUPT_BKPT; |
- pReader->nData -= n; |
- pReader->pData += n; |
- if( nSuffix<0 || nSuffix>pReader->nData ) return SQLITE_CORRUPT_BKPT; |
- if( nPrefix<0 || nPrefix>pReader->term.nData ) return SQLITE_CORRUPT_BKPT; |
- pReader->term.nData = nPrefix; |
- dataBufferAppend(&pReader->term, pReader->pData, nSuffix); |
- |
- pReader->pData += nSuffix; |
- pReader->nData -= nSuffix; |
- } |
- return SQLITE_OK; |
-} |
- |
-/* strcmp-style comparison of pReader's current term against pTerm. |
-** If isPrefix, equality means equal through nTerm bytes. |
-*/ |
-static int leafReaderTermCmp(LeafReader *pReader, |
- const char *pTerm, int nTerm, int isPrefix){ |
- int c, n = pReader->term.nData<nTerm ? pReader->term.nData : nTerm; |
- if( n==0 ){ |
- if( pReader->term.nData>0 ) return -1; |
- if(nTerm>0 ) return 1; |
- return 0; |
- } |
- |
- c = memcmp(pReader->term.pData, pTerm, n); |
- if( c!=0 ) return c; |
- if( isPrefix && n==nTerm ) return 0; |
- return pReader->term.nData - nTerm; |
-} |
- |
- |
-/****************************************************************/ |
-/* LeavesReader wraps LeafReader to allow iterating over the entire |
-** leaf layer of the tree. |
-*/ |
-typedef struct LeavesReader { |
- int idx; /* Index within the segment. */ |
- |
- sqlite3_stmt *pStmt; /* Statement we're streaming leaves from. */ |
- int eof; /* we've seen SQLITE_DONE from pStmt. */ |
- |
- LeafReader leafReader; /* reader for the current leaf. */ |
- DataBuffer rootData; /* root data for inline. */ |
-} LeavesReader; |
- |
-/* Access the current term. */ |
-static int leavesReaderTermBytes(LeavesReader *pReader){ |
- assert( !pReader->eof ); |
- return leafReaderTermBytes(&pReader->leafReader); |
-} |
-static const char *leavesReaderTerm(LeavesReader *pReader){ |
- assert( !pReader->eof ); |
- return leafReaderTerm(&pReader->leafReader); |
-} |
- |
-/* Access the doclist data for the current term. */ |
-static int leavesReaderDataBytes(LeavesReader *pReader){ |
- assert( !pReader->eof ); |
- return leafReaderDataBytes(&pReader->leafReader); |
-} |
-static const char *leavesReaderData(LeavesReader *pReader){ |
- assert( !pReader->eof ); |
- return leafReaderData(&pReader->leafReader); |
-} |
- |
-static int leavesReaderAtEnd(LeavesReader *pReader){ |
- return pReader->eof; |
-} |
- |
-/* loadSegmentLeaves() may not read all the way to SQLITE_DONE, thus |
-** leaving the statement handle open, which locks the table. |
-*/ |
-/* TODO(shess) This "solution" is not satisfactory. Really, there |
-** should be check-in function for all statement handles which |
-** arranges to call sqlite3_reset(). This most likely will require |
-** modification to control flow all over the place, though, so for now |
-** just punt. |
-** |
-** Note the the current system assumes that segment merges will run to |
-** completion, which is why this particular probably hasn't arisen in |
-** this case. Probably a brittle assumption. |
-*/ |
-static int leavesReaderReset(LeavesReader *pReader){ |
- return sqlite3_reset(pReader->pStmt); |
-} |
- |
-static void leavesReaderDestroy(LeavesReader *pReader){ |
- /* If idx is -1, that means we're using a non-cached statement |
- ** handle in the optimize() case, so we need to release it. |
- */ |
- if( pReader->pStmt!=NULL && pReader->idx==-1 ){ |
- sqlite3_finalize(pReader->pStmt); |
- } |
- leafReaderDestroy(&pReader->leafReader); |
- dataBufferDestroy(&pReader->rootData); |
- SCRAMBLE(pReader); |
-} |
- |
-/* Initialize pReader with the given root data (if iStartBlockid==0 |
-** the leaf data was entirely contained in the root), or from the |
-** stream of blocks between iStartBlockid and iEndBlockid, inclusive. |
-*/ |
-static int leavesReaderInit(fulltext_vtab *v, |
- int idx, |
- sqlite_int64 iStartBlockid, |
- sqlite_int64 iEndBlockid, |
- const char *pRootData, int nRootData, |
- LeavesReader *pReader){ |
- CLEAR(pReader); |
- pReader->idx = idx; |
- |
- dataBufferInit(&pReader->rootData, 0); |
- if( iStartBlockid==0 ){ |
- int rc; |
- /* Corrupt if this can't be a leaf node. */ |
- if( pRootData==NULL || nRootData<1 || pRootData[0]!='\0' ){ |
- return SQLITE_CORRUPT_BKPT; |
- } |
- /* Entire leaf level fit in root data. */ |
- dataBufferReplace(&pReader->rootData, pRootData, nRootData); |
- rc = leafReaderInit(pReader->rootData.pData, pReader->rootData.nData, |
- &pReader->leafReader); |
- if( rc!=SQLITE_OK ){ |
- dataBufferDestroy(&pReader->rootData); |
- return rc; |
- } |
- }else{ |
- sqlite3_stmt *s; |
- int rc = sql_get_leaf_statement(v, idx, &s); |
- if( rc!=SQLITE_OK ) return rc; |
- |
- rc = sqlite3_bind_int64(s, 1, iStartBlockid); |
- if( rc!=SQLITE_OK ) goto err; |
- |
- rc = sqlite3_bind_int64(s, 2, iEndBlockid); |
- if( rc!=SQLITE_OK ) goto err; |
- |
- rc = sqlite3_step(s); |
- |
- /* Corrupt if interior node referenced missing leaf node. */ |
- if( rc==SQLITE_DONE ){ |
- rc = SQLITE_CORRUPT_BKPT; |
- goto err; |
- } |
- |
- if( rc!=SQLITE_ROW ) goto err; |
- rc = SQLITE_OK; |
- |
- /* Corrupt if leaf data isn't a blob. */ |
- if( sqlite3_column_type(s, 0)!=SQLITE_BLOB ){ |
- rc = SQLITE_CORRUPT_BKPT; |
- }else{ |
- const char *pLeafData = sqlite3_column_blob(s, 0); |
- int nLeafData = sqlite3_column_bytes(s, 0); |
- |
- /* Corrupt if this can't be a leaf node. */ |
- if( pLeafData==NULL || nLeafData<1 || pLeafData[0]!='\0' ){ |
- rc = SQLITE_CORRUPT_BKPT; |
- }else{ |
- rc = leafReaderInit(pLeafData, nLeafData, &pReader->leafReader); |
- } |
- } |
- |
- err: |
- if( rc!=SQLITE_OK ){ |
- if( idx==-1 ){ |
- sqlite3_finalize(s); |
- }else{ |
- sqlite3_reset(s); |
- } |
- return rc; |
- } |
- |
- pReader->pStmt = s; |
- } |
- return SQLITE_OK; |
-} |
- |
-/* Step the current leaf forward to the next term. If we reach the |
-** end of the current leaf, step forward to the next leaf block. |
-*/ |
-static int leavesReaderStep(fulltext_vtab *v, LeavesReader *pReader){ |
- int rc; |
- assert( !leavesReaderAtEnd(pReader) ); |
- rc = leafReaderStep(&pReader->leafReader); |
- if( rc!=SQLITE_OK ) return rc; |
- |
- if( leafReaderAtEnd(&pReader->leafReader) ){ |
- if( pReader->rootData.pData ){ |
- pReader->eof = 1; |
- return SQLITE_OK; |
- } |
- rc = sqlite3_step(pReader->pStmt); |
- if( rc!=SQLITE_ROW ){ |
- pReader->eof = 1; |
- return rc==SQLITE_DONE ? SQLITE_OK : rc; |
- } |
- |
- /* Corrupt if leaf data isn't a blob. */ |
- if( sqlite3_column_type(pReader->pStmt, 0)!=SQLITE_BLOB ){ |
- return SQLITE_CORRUPT_BKPT; |
- }else{ |
- LeafReader tmp; |
- const char *pLeafData = sqlite3_column_blob(pReader->pStmt, 0); |
- int nLeafData = sqlite3_column_bytes(pReader->pStmt, 0); |
- |
- /* Corrupt if this can't be a leaf node. */ |
- if( pLeafData==NULL || nLeafData<1 || pLeafData[0]!='\0' ){ |
- return SQLITE_CORRUPT_BKPT; |
- } |
- |
- rc = leafReaderInit(pLeafData, nLeafData, &tmp); |
- if( rc!=SQLITE_OK ) return rc; |
- leafReaderDestroy(&pReader->leafReader); |
- pReader->leafReader = tmp; |
- } |
- } |
- return SQLITE_OK; |
-} |
- |
-/* Order LeavesReaders by their term, ignoring idx. Readers at eof |
-** always sort to the end. |
-*/ |
-static int leavesReaderTermCmp(LeavesReader *lr1, LeavesReader *lr2){ |
- if( leavesReaderAtEnd(lr1) ){ |
- if( leavesReaderAtEnd(lr2) ) return 0; |
- return 1; |
- } |
- if( leavesReaderAtEnd(lr2) ) return -1; |
- |
- return leafReaderTermCmp(&lr1->leafReader, |
- leavesReaderTerm(lr2), leavesReaderTermBytes(lr2), |
- 0); |
-} |
- |
-/* Similar to leavesReaderTermCmp(), with additional ordering by idx |
-** so that older segments sort before newer segments. |
-*/ |
-static int leavesReaderCmp(LeavesReader *lr1, LeavesReader *lr2){ |
- int c = leavesReaderTermCmp(lr1, lr2); |
- if( c!=0 ) return c; |
- return lr1->idx-lr2->idx; |
-} |
- |
-/* Assume that pLr[1]..pLr[nLr] are sorted. Bubble pLr[0] into its |
-** sorted position. |
-*/ |
-static void leavesReaderReorder(LeavesReader *pLr, int nLr){ |
- while( nLr>1 && leavesReaderCmp(pLr, pLr+1)>0 ){ |
- LeavesReader tmp = pLr[0]; |
- pLr[0] = pLr[1]; |
- pLr[1] = tmp; |
- nLr--; |
- pLr++; |
- } |
-} |
- |
-/* Initializes pReaders with the segments from level iLevel, returning |
-** the number of segments in *piReaders. Leaves pReaders in sorted |
-** order. |
-*/ |
-static int leavesReadersInit(fulltext_vtab *v, int iLevel, |
- LeavesReader *pReaders, int *piReaders){ |
- sqlite3_stmt *s; |
- int i, rc = sql_get_statement(v, SEGDIR_SELECT_LEVEL_STMT, &s); |
- if( rc!=SQLITE_OK ) return rc; |
- |
- rc = sqlite3_bind_int(s, 1, iLevel); |
- if( rc!=SQLITE_OK ) return rc; |
- |
- i = 0; |
- while( (rc = sqlite3_step(s))==SQLITE_ROW ){ |
- sqlite_int64 iStart = sqlite3_column_int64(s, 0); |
- sqlite_int64 iEnd = sqlite3_column_int64(s, 1); |
- const char *pRootData = sqlite3_column_blob(s, 2); |
- int nRootData = sqlite3_column_bytes(s, 2); |
- sqlite_int64 iIndex = sqlite3_column_int64(s, 3); |
- |
- /* Corrupt if we get back different types than we stored. */ |
- /* Also corrupt if the index is not sequential starting at 0. */ |
- if( sqlite3_column_type(s, 0)!=SQLITE_INTEGER || |
- sqlite3_column_type(s, 1)!=SQLITE_INTEGER || |
- sqlite3_column_type(s, 2)!=SQLITE_BLOB || |
- i!=iIndex || |
- i>=MERGE_COUNT ){ |
- rc = SQLITE_CORRUPT_BKPT; |
- break; |
- } |
- |
- rc = leavesReaderInit(v, i, iStart, iEnd, pRootData, nRootData, |
- &pReaders[i]); |
- if( rc!=SQLITE_OK ) break; |
- |
- i++; |
- } |
- if( rc!=SQLITE_DONE ){ |
- while( i-->0 ){ |
- leavesReaderDestroy(&pReaders[i]); |
- } |
- sqlite3_reset(s); /* So we don't leave a lock. */ |
- return rc; |
- } |
- |
- *piReaders = i; |
- |
- /* Leave our results sorted by term, then age. */ |
- while( i-- ){ |
- leavesReaderReorder(pReaders+i, *piReaders-i); |
- } |
- return SQLITE_OK; |
-} |
- |
-/* Merge doclists from pReaders[nReaders] into a single doclist, which |
-** is written to pWriter. Assumes pReaders is ordered oldest to |
-** newest. |
-*/ |
-/* TODO(shess) Consider putting this inline in segmentMerge(). */ |
-static int leavesReadersMerge(fulltext_vtab *v, |
- LeavesReader *pReaders, int nReaders, |
- LeafWriter *pWriter){ |
- DLReader dlReaders[MERGE_COUNT]; |
- const char *pTerm = leavesReaderTerm(pReaders); |
- int i, nTerm = leavesReaderTermBytes(pReaders); |
- int rc; |
- |
- assert( nReaders<=MERGE_COUNT ); |
- |
- for(i=0; i<nReaders; i++){ |
- const char *pData = leavesReaderData(pReaders+i); |
- if( pData==NULL ){ |
- rc = SQLITE_CORRUPT_BKPT; |
- break; |
- } |
- rc = dlrInit(&dlReaders[i], DL_DEFAULT, |
- pData, |
- leavesReaderDataBytes(pReaders+i)); |
- if( rc!=SQLITE_OK ) break; |
- } |
- if( rc!=SQLITE_OK ){ |
- while( i-->0 ){ |
- dlrDestroy(&dlReaders[i]); |
- } |
- return rc; |
- } |
- |
- return leafWriterStepMerge(v, pWriter, pTerm, nTerm, dlReaders, nReaders); |
-} |
- |
-/* Forward ref due to mutual recursion with segdirNextIndex(). */ |
-static int segmentMerge(fulltext_vtab *v, int iLevel); |
- |
-/* Put the next available index at iLevel into *pidx. If iLevel |
-** already has MERGE_COUNT segments, they are merged to a higher |
-** level to make room. |
-*/ |
-static int segdirNextIndex(fulltext_vtab *v, int iLevel, int *pidx){ |
- int rc = segdir_max_index(v, iLevel, pidx); |
- if( rc==SQLITE_DONE ){ /* No segments at iLevel. */ |
- *pidx = 0; |
- }else if( rc==SQLITE_ROW ){ |
- if( *pidx==(MERGE_COUNT-1) ){ |
- rc = segmentMerge(v, iLevel); |
- if( rc!=SQLITE_OK ) return rc; |
- *pidx = 0; |
- }else{ |
- (*pidx)++; |
+ return SQLITE_NOMEM; |
} |
}else{ |
- return rc; |
- } |
- return SQLITE_OK; |
-} |
- |
-/* Merge MERGE_COUNT segments at iLevel into a new segment at |
-** iLevel+1. If iLevel+1 is already full of segments, those will be |
-** merged to make room. |
-*/ |
-static int segmentMerge(fulltext_vtab *v, int iLevel){ |
- LeafWriter writer; |
- LeavesReader lrs[MERGE_COUNT]; |
- int i, rc, idx = 0; |
- |
- /* Determine the next available segment index at the next level, |
- ** merging as necessary. |
- */ |
- rc = segdirNextIndex(v, iLevel+1, &idx); |
- if( rc!=SQLITE_OK ) return rc; |
- |
- /* TODO(shess) This assumes that we'll always see exactly |
- ** MERGE_COUNT segments to merge at a given level. That will be |
- ** broken if we allow the developer to request preemptive or |
- ** deferred merging. |
- */ |
- memset(&lrs, '\0', sizeof(lrs)); |
- rc = leavesReadersInit(v, iLevel, lrs, &i); |
- if( rc!=SQLITE_OK ) return rc; |
- |
- leafWriterInit(iLevel+1, idx, &writer); |
- |
- if( i!=MERGE_COUNT ){ |
- rc = SQLITE_CORRUPT_BKPT; |
- goto err; |
- } |
- |
- /* Since leavesReaderReorder() pushes readers at eof to the end, |
- ** when the first reader is empty, all will be empty. |
- */ |
- while( !leavesReaderAtEnd(lrs) ){ |
- /* Figure out how many readers share their next term. */ |
- for(i=1; i<MERGE_COUNT && !leavesReaderAtEnd(lrs+i); i++){ |
- if( 0!=leavesReaderTermCmp(lrs, lrs+i) ) break; |
- } |
- |
- rc = leavesReadersMerge(v, lrs, i, &writer); |
- if( rc!=SQLITE_OK ) goto err; |
- |
- /* Step forward those that were merged. */ |
- while( i-->0 ){ |
- rc = leavesReaderStep(v, lrs+i); |
- if( rc!=SQLITE_OK ) goto err; |
- |
- /* Reorder by term, then by age. */ |
- leavesReaderReorder(lrs+i, MERGE_COUNT-i); |
- } |
- } |
- |
- for(i=0; i<MERGE_COUNT; i++){ |
- leavesReaderDestroy(&lrs[i]); |
- } |
- |
- rc = leafWriterFinalize(v, &writer); |
- leafWriterDestroy(&writer); |
- if( rc!=SQLITE_OK ) return rc; |
- |
- /* Delete the merged segment data. */ |
- return segdir_delete(v, iLevel); |
- |
- err: |
- for(i=0; i<MERGE_COUNT; i++){ |
- leavesReaderDestroy(&lrs[i]); |
- } |
- leafWriterDestroy(&writer); |
- return rc; |
-} |
- |
-/* Accumulate the union of *acc and *pData into *acc. */ |
-static int docListAccumulateUnion(DataBuffer *acc, |
- const char *pData, int nData) { |
- DataBuffer tmp = *acc; |
- int rc; |
- dataBufferInit(acc, tmp.nData+nData); |
- rc = docListUnion(tmp.pData, tmp.nData, pData, nData, acc); |
- dataBufferDestroy(&tmp); |
- return rc; |
-} |
- |
-/* TODO(shess) It might be interesting to explore different merge |
-** strategies, here. For instance, since this is a sorted merge, we |
-** could easily merge many doclists in parallel. With some |
-** comprehension of the storage format, we could merge all of the |
-** doclists within a leaf node directly from the leaf node's storage. |
-** It may be worthwhile to merge smaller doclists before larger |
-** doclists, since they can be traversed more quickly - but the |
-** results may have less overlap, making them more expensive in a |
-** different way. |
-*/ |
- |
-/* Scan pReader for pTerm/nTerm, and merge the term's doclist over |
-** *out (any doclists with duplicate docids overwrite those in *out). |
-** Internal function for loadSegmentLeaf(). |
-*/ |
-static int loadSegmentLeavesInt(fulltext_vtab *v, LeavesReader *pReader, |
- const char *pTerm, int nTerm, int isPrefix, |
- DataBuffer *out){ |
- /* doclist data is accumulated into pBuffers similar to how one does |
- ** increment in binary arithmetic. If index 0 is empty, the data is |
- ** stored there. If there is data there, it is merged and the |
- ** results carried into position 1, with further merge-and-carry |
- ** until an empty position is found. |
- */ |
- DataBuffer *pBuffers = NULL; |
- int nBuffers = 0, nMaxBuffers = 0, rc; |
- |
- assert( nTerm>0 ); |
- |
- for(rc=SQLITE_OK; rc==SQLITE_OK && !leavesReaderAtEnd(pReader); |
- rc=leavesReaderStep(v, pReader)){ |
- /* TODO(shess) Really want leavesReaderTermCmp(), but that name is |
- ** already taken to compare the terms of two LeavesReaders. Think |
- ** on a better name. [Meanwhile, break encapsulation rather than |
- ** use a confusing name.] |
- */ |
- int c = leafReaderTermCmp(&pReader->leafReader, pTerm, nTerm, isPrefix); |
- if( c>0 ) break; /* Past any possible matches. */ |
- if( c==0 ){ |
- int iBuffer, nData; |
- const char *pData = leavesReaderData(pReader); |
- if( pData==NULL ){ |
- rc = SQLITE_CORRUPT_BKPT; |
+ int mergetype = (pTS->isReqPos ? MERGE_POS_OR : MERGE_OR); |
+ char *aMerge = aDoclist; |
+ int nMerge = nDoclist; |
+ int iOut; |
+ |
+ for(iOut=0; iOut<SizeofArray(pTS->aaOutput); iOut++){ |
+ char *aNew; |
+ int nNew; |
+ if( pTS->aaOutput[iOut]==0 ){ |
+ assert( iOut>0 ); |
+ pTS->aaOutput[iOut] = aMerge; |
+ pTS->anOutput[iOut] = nMerge; |
break; |
} |
- nData = leavesReaderDataBytes(pReader); |
- |
- /* Find the first empty buffer. */ |
- for(iBuffer=0; iBuffer<nBuffers; ++iBuffer){ |
- if( 0==pBuffers[iBuffer].nData ) break; |
- } |
- |
- /* Out of buffers, add an empty one. */ |
- if( iBuffer==nBuffers ){ |
- if( nBuffers==nMaxBuffers ){ |
- DataBuffer *p; |
- nMaxBuffers += 20; |
- |
- /* Manual realloc so we can handle NULL appropriately. */ |
- p = sqlite3_malloc(nMaxBuffers*sizeof(*pBuffers)); |
- if( p==NULL ){ |
- rc = SQLITE_NOMEM; |
- break; |
- } |
- |
- if( nBuffers>0 ){ |
- assert(pBuffers!=NULL); |
- memcpy(p, pBuffers, nBuffers*sizeof(*pBuffers)); |
- sqlite3_free(pBuffers); |
- } |
- pBuffers = p; |
- } |
- dataBufferInit(&(pBuffers[nBuffers]), 0); |
- nBuffers++; |
- } |
- |
- /* At this point, must have an empty at iBuffer. */ |
- assert(iBuffer<nBuffers && pBuffers[iBuffer].nData==0); |
- |
- /* If empty was first buffer, no need for merge logic. */ |
- if( iBuffer==0 ){ |
- dataBufferReplace(&(pBuffers[0]), pData, nData); |
- }else{ |
- /* pAcc is the empty buffer the merged data will end up in. */ |
- DataBuffer *pAcc = &(pBuffers[iBuffer]); |
- DataBuffer *p = &(pBuffers[0]); |
- |
- /* Handle position 0 specially to avoid need to prime pAcc |
- ** with pData/nData. |
- */ |
- dataBufferSwap(p, pAcc); |
- rc = docListAccumulateUnion(pAcc, pData, nData); |
- if( rc!=SQLITE_OK ) goto err; |
- |
- /* Accumulate remaining doclists into pAcc. */ |
- for(++p; p<pAcc; ++p){ |
- rc = docListAccumulateUnion(pAcc, p->pData, p->nData); |
- if( rc!=SQLITE_OK ) goto err; |
- |
- /* dataBufferReset() could allow a large doclist to blow up |
- ** our memory requirements. |
- */ |
- if( p->nCapacity<1024 ){ |
- dataBufferReset(p); |
- }else{ |
- dataBufferDestroy(p); |
- dataBufferInit(p, 0); |
- } |
- } |
- } |
- } |
- } |
- |
- /* Union all the doclists together into *out. */ |
- /* TODO(shess) What if *out is big? Sigh. */ |
- if( rc==SQLITE_OK && nBuffers>0 ){ |
- int iBuffer; |
- for(iBuffer=0; iBuffer<nBuffers; ++iBuffer){ |
- if( pBuffers[iBuffer].nData>0 ){ |
- if( out->nData==0 ){ |
- dataBufferSwap(out, &(pBuffers[iBuffer])); |
- }else{ |
- rc = docListAccumulateUnion(out, pBuffers[iBuffer].pData, |
- pBuffers[iBuffer].nData); |
- if( rc!=SQLITE_OK ) break; |
- } |
- } |
- } |
- } |
- |
-err: |
- while( nBuffers-- ){ |
- dataBufferDestroy(&(pBuffers[nBuffers])); |
- } |
- if( pBuffers!=NULL ) sqlite3_free(pBuffers); |
- |
- return rc; |
-} |
- |
-/* Call loadSegmentLeavesInt() with pData/nData as input. */ |
-static int loadSegmentLeaf(fulltext_vtab *v, const char *pData, int nData, |
- const char *pTerm, int nTerm, int isPrefix, |
- DataBuffer *out){ |
- LeavesReader reader; |
- int rc; |
- |
- assert( nData>1 ); |
- assert( *pData=='\0' ); |
- rc = leavesReaderInit(v, 0, 0, 0, pData, nData, &reader); |
- if( rc!=SQLITE_OK ) return rc; |
- |
- rc = loadSegmentLeavesInt(v, &reader, pTerm, nTerm, isPrefix, out); |
- leavesReaderReset(&reader); |
- leavesReaderDestroy(&reader); |
- return rc; |
-} |
- |
-/* Call loadSegmentLeavesInt() with the leaf nodes from iStartLeaf to |
-** iEndLeaf (inclusive) as input, and merge the resulting doclist into |
-** out. |
-*/ |
-static int loadSegmentLeaves(fulltext_vtab *v, |
- sqlite_int64 iStartLeaf, sqlite_int64 iEndLeaf, |
- const char *pTerm, int nTerm, int isPrefix, |
- DataBuffer *out){ |
- int rc; |
- LeavesReader reader; |
- |
- assert( iStartLeaf<=iEndLeaf ); |
- rc = leavesReaderInit(v, 0, iStartLeaf, iEndLeaf, NULL, 0, &reader); |
- if( rc!=SQLITE_OK ) return rc; |
- |
- rc = loadSegmentLeavesInt(v, &reader, pTerm, nTerm, isPrefix, out); |
- leavesReaderReset(&reader); |
- leavesReaderDestroy(&reader); |
- return rc; |
-} |
- |
-/* Taking pData/nData as an interior node, find the sequence of child |
-** nodes which could include pTerm/nTerm/isPrefix. Note that the |
-** interior node terms logically come between the blocks, so there is |
-** one more blockid than there are terms (that block contains terms >= |
-** the last interior-node term). |
-*/ |
-/* TODO(shess) The calling code may already know that the end child is |
-** not worth calculating, because the end may be in a later sibling |
-** node. Consider whether breaking symmetry is worthwhile. I suspect |
-** it is not worthwhile. |
-*/ |
-static int getChildrenContaining(const char *pData, int nData, |
- const char *pTerm, int nTerm, int isPrefix, |
- sqlite_int64 *piStartChild, |
- sqlite_int64 *piEndChild){ |
- InteriorReader reader; |
- int rc; |
- |
- assert( nData>1 ); |
- assert( *pData!='\0' ); |
- rc = interiorReaderInit(pData, nData, &reader); |
- if( rc!=SQLITE_OK ) return rc; |
- |
- /* Scan for the first child which could contain pTerm/nTerm. */ |
- while( !interiorReaderAtEnd(&reader) ){ |
- if( interiorReaderTermCmp(&reader, pTerm, nTerm, 0)>0 ) break; |
- rc = interiorReaderStep(&reader); |
- if( rc!=SQLITE_OK ){ |
- interiorReaderDestroy(&reader); |
- return rc; |
- } |
- } |
- *piStartChild = interiorReaderCurrentBlockid(&reader); |
- /* Keep scanning to find a term greater than our term, using prefix |
- ** comparison if indicated. If isPrefix is false, this will be the |
- ** same blockid as the starting block. |
- */ |
- while( !interiorReaderAtEnd(&reader) ){ |
- if( interiorReaderTermCmp(&reader, pTerm, nTerm, isPrefix)>0 ) break; |
- rc = interiorReaderStep(&reader); |
- if( rc!=SQLITE_OK ){ |
- interiorReaderDestroy(&reader); |
- return rc; |
+ nNew = nMerge + pTS->anOutput[iOut]; |
+ aNew = sqlite3_malloc(nNew); |
+ if( !aNew ){ |
+ if( aMerge!=aDoclist ){ |
+ sqlite3_free(aMerge); |
+ } |
+ return SQLITE_NOMEM; |
+ } |
+ fts3DoclistMerge(mergetype, 0, 0, aNew, &nNew, |
+ pTS->aaOutput[iOut], pTS->anOutput[iOut], aMerge, nMerge, 0 |
+ ); |
+ |
+ if( iOut>0 ) sqlite3_free(aMerge); |
+ sqlite3_free(pTS->aaOutput[iOut]); |
+ pTS->aaOutput[iOut] = 0; |
+ |
+ aMerge = aNew; |
+ nMerge = nNew; |
+ if( (iOut+1)==SizeofArray(pTS->aaOutput) ){ |
+ pTS->aaOutput[iOut] = aMerge; |
+ pTS->anOutput[iOut] = nMerge; |
+ } |
} |
} |
- *piEndChild = interiorReaderCurrentBlockid(&reader); |
- |
- interiorReaderDestroy(&reader); |
- |
- /* Children must ascend, and if !prefix, both must be the same. */ |
- assert( *piEndChild>=*piStartChild ); |
- assert( isPrefix || *piStartChild==*piEndChild ); |
- return rc; |
+ return SQLITE_OK; |
} |
-/* Read block at iBlockid and pass it with other params to |
-** getChildrenContaining(). |
-*/ |
-static int loadAndGetChildrenContaining( |
- fulltext_vtab *v, |
- sqlite_int64 iBlockid, |
- const char *pTerm, int nTerm, int isPrefix, |
- sqlite_int64 *piStartChild, sqlite_int64 *piEndChild |
+static int fts3DeferredTermSelect( |
+ Fts3DeferredToken *pToken, /* Phrase token */ |
+ int isTermPos, /* True to include positions */ |
+ int *pnOut, /* OUT: Size of list */ |
+ char **ppOut /* OUT: Body of list */ |
){ |
- sqlite3_stmt *s = NULL; |
- int rc; |
- |
- assert( iBlockid!=0 ); |
- assert( pTerm!=NULL ); |
- assert( nTerm!=0 ); /* TODO(shess) Why not allow this? */ |
- assert( piStartChild!=NULL ); |
- assert( piEndChild!=NULL ); |
- |
- rc = sql_get_statement(v, BLOCK_SELECT_STMT, &s); |
- if( rc!=SQLITE_OK ) return rc; |
- |
- rc = sqlite3_bind_int64(s, 1, iBlockid); |
- if( rc!=SQLITE_OK ) return rc; |
- |
- rc = sqlite3_step(s); |
- /* Corrupt if interior node references missing child node. */ |
- if( rc==SQLITE_DONE ) return SQLITE_CORRUPT_BKPT; |
- if( rc!=SQLITE_ROW ) return rc; |
- |
- /* Corrupt if child node isn't a blob. */ |
- if( sqlite3_column_type(s, 0)!=SQLITE_BLOB ){ |
- sqlite3_reset(s); /* So we don't leave a lock. */ |
- return SQLITE_CORRUPT_BKPT; |
+ char *aSource; |
+ int nSource; |
+ |
+ aSource = sqlite3Fts3DeferredDoclist(pToken, &nSource); |
+ if( !aSource ){ |
+ *pnOut = 0; |
+ *ppOut = 0; |
+ }else if( isTermPos ){ |
+ *ppOut = sqlite3_malloc(nSource); |
+ if( !*ppOut ) return SQLITE_NOMEM; |
+ memcpy(*ppOut, aSource, nSource); |
+ *pnOut = nSource; |
}else{ |
- const char *pData = sqlite3_column_blob(s, 0); |
- int nData = sqlite3_column_bytes(s, 0); |
- |
- /* Corrupt if child is not a valid interior node. */ |
- if( pData==NULL || nData<1 || pData[0]=='\0' ){ |
- sqlite3_reset(s); /* So we don't leave a lock. */ |
- return SQLITE_CORRUPT_BKPT; |
- } |
- |
- rc = getChildrenContaining(pData, nData, pTerm, nTerm, |
- isPrefix, piStartChild, piEndChild); |
- if( rc!=SQLITE_OK ){ |
- sqlite3_reset(s); |
- return rc; |
- } |
+ sqlite3_int64 docid; |
+ *pnOut = sqlite3Fts3GetVarint(aSource, &docid); |
+ *ppOut = sqlite3_malloc(*pnOut); |
+ if( !*ppOut ) return SQLITE_NOMEM; |
+ sqlite3Fts3PutVarint(*ppOut, docid); |
} |
- /* We expect only one row. We must execute another sqlite3_step() |
- * to complete the iteration; otherwise the table will remain |
- * locked. */ |
- rc = sqlite3_step(s); |
- if( rc==SQLITE_ROW ) return SQLITE_ERROR; |
- if( rc!=SQLITE_DONE ) return rc; |
- |
return SQLITE_OK; |
} |
-/* Traverse the tree represented by pData[nData] looking for |
-** pTerm[nTerm], placing its doclist into *out. This is internal to |
-** loadSegment() to make error-handling cleaner. |
-*/ |
-static int loadSegmentInt(fulltext_vtab *v, const char *pData, int nData, |
- sqlite_int64 iLeavesEnd, |
- const char *pTerm, int nTerm, int isPrefix, |
- DataBuffer *out){ |
- /* Special case where root is a leaf. */ |
- if( *pData=='\0' ){ |
- return loadSegmentLeaf(v, pData, nData, pTerm, nTerm, isPrefix, out); |
- }else{ |
- int rc; |
- sqlite_int64 iStartChild, iEndChild; |
- |
- /* Process pData as an interior node, then loop down the tree |
- ** until we find the set of leaf nodes to scan for the term. |
- */ |
- rc = getChildrenContaining(pData, nData, pTerm, nTerm, isPrefix, |
- &iStartChild, &iEndChild); |
- if( rc!=SQLITE_OK ) return rc; |
- while( iStartChild>iLeavesEnd ){ |
- sqlite_int64 iNextStart, iNextEnd; |
- rc = loadAndGetChildrenContaining(v, iStartChild, pTerm, nTerm, isPrefix, |
- &iNextStart, &iNextEnd); |
- if( rc!=SQLITE_OK ) return rc; |
- |
- /* If we've branched, follow the end branch, too. */ |
- if( iStartChild!=iEndChild ){ |
- sqlite_int64 iDummy; |
- rc = loadAndGetChildrenContaining(v, iEndChild, pTerm, nTerm, isPrefix, |
- &iDummy, &iNextEnd); |
- if( rc!=SQLITE_OK ) return rc; |
+int sqlite3Fts3SegReaderCursor( |
+ Fts3Table *p, /* FTS3 table handle */ |
+ int iLevel, /* Level of segments to scan */ |
+ const char *zTerm, /* Term to query for */ |
+ int nTerm, /* Size of zTerm in bytes */ |
+ int isPrefix, /* True for a prefix search */ |
+ int isScan, /* True to scan from zTerm to EOF */ |
+ Fts3SegReaderCursor *pCsr /* Cursor object to populate */ |
+){ |
+ int rc = SQLITE_OK; |
+ int rc2; |
+ int iAge = 0; |
+ sqlite3_stmt *pStmt = 0; |
+ Fts3SegReader *pPending = 0; |
+ |
+ assert( iLevel==FTS3_SEGCURSOR_ALL |
+ || iLevel==FTS3_SEGCURSOR_PENDING |
+ || iLevel>=0 |
+ ); |
+ assert( FTS3_SEGCURSOR_PENDING<0 ); |
+ assert( FTS3_SEGCURSOR_ALL<0 ); |
+ assert( iLevel==FTS3_SEGCURSOR_ALL || (zTerm==0 && isPrefix==1) ); |
+ assert( isPrefix==0 || isScan==0 ); |
+ |
+ |
+ memset(pCsr, 0, sizeof(Fts3SegReaderCursor)); |
+ |
+ /* If iLevel is less than 0, include a seg-reader for the pending-terms. */ |
+ assert( isScan==0 || fts3HashCount(&p->pendingTerms)==0 ); |
+ if( iLevel<0 && isScan==0 ){ |
+ rc = sqlite3Fts3SegReaderPending(p, zTerm, nTerm, isPrefix, &pPending); |
+ if( rc==SQLITE_OK && pPending ){ |
+ int nByte = (sizeof(Fts3SegReader *) * 16); |
+ pCsr->apSegment = (Fts3SegReader **)sqlite3_malloc(nByte); |
+ if( pCsr->apSegment==0 ){ |
+ rc = SQLITE_NOMEM; |
+ }else{ |
+ pCsr->apSegment[0] = pPending; |
+ pCsr->nSegment = 1; |
+ pPending = 0; |
} |
- |
- assert( iNextStart<=iNextEnd ); |
- iStartChild = iNextStart; |
- iEndChild = iNextEnd; |
} |
- assert( iStartChild<=iLeavesEnd ); |
- assert( iEndChild<=iLeavesEnd ); |
- |
- /* Scan through the leaf segments for doclists. */ |
- return loadSegmentLeaves(v, iStartChild, iEndChild, |
- pTerm, nTerm, isPrefix, out); |
} |
-} |
- |
-/* Call loadSegmentInt() to collect the doclist for pTerm/nTerm, then |
-** merge its doclist over *out (any duplicate doclists read from the |
-** segment rooted at pData will overwrite those in *out). |
-*/ |
-/* TODO(shess) Consider changing this to determine the depth of the |
-** leaves using either the first characters of interior nodes (when |
-** ==1, we're one level above the leaves), or the first character of |
-** the root (which will describe the height of the tree directly). |
-** Either feels somewhat tricky to me. |
-*/ |
-/* TODO(shess) The current merge is likely to be slow for large |
-** doclists (though it should process from newest/smallest to |
-** oldest/largest, so it may not be that bad). It might be useful to |
-** modify things to allow for N-way merging. This could either be |
-** within a segment, with pairwise merges across segments, or across |
-** all segments at once. |
-*/ |
-static int loadSegment(fulltext_vtab *v, const char *pData, int nData, |
- sqlite_int64 iLeavesEnd, |
- const char *pTerm, int nTerm, int isPrefix, |
- DataBuffer *out){ |
- DataBuffer result; |
- int rc; |
- |
- /* Corrupt if segment root can't be valid. */ |
- if( pData==NULL || nData<1 ) return SQLITE_CORRUPT_BKPT; |
- |
- /* This code should never be called with buffered updates. */ |
- assert( v->nPendingData<0 ); |
- |
- dataBufferInit(&result, 0); |
- rc = loadSegmentInt(v, pData, nData, iLeavesEnd, |
- pTerm, nTerm, isPrefix, &result); |
- if( rc==SQLITE_OK && result.nData>0 ){ |
- if( out->nData==0 ){ |
- DataBuffer tmp = *out; |
- *out = result; |
- result = tmp; |
- }else{ |
- DataBuffer merged; |
- DLReader readers[2]; |
- rc = dlrInit(&readers[0], DL_DEFAULT, out->pData, out->nData); |
- if( rc==SQLITE_OK ){ |
- rc = dlrInit(&readers[1], DL_DEFAULT, result.pData, result.nData); |
- if( rc==SQLITE_OK ){ |
- dataBufferInit(&merged, out->nData+result.nData); |
- rc = docListMerge(&merged, readers, 2); |
- dataBufferDestroy(out); |
- *out = merged; |
- dlrDestroy(&readers[1]); |
+ if( iLevel!=FTS3_SEGCURSOR_PENDING ){ |
+ if( rc==SQLITE_OK ){ |
+ rc = sqlite3Fts3AllSegdirs(p, iLevel, &pStmt); |
+ } |
+ while( rc==SQLITE_OK && SQLITE_ROW==(rc = sqlite3_step(pStmt)) ){ |
+ |
+ /* Read the values returned by the SELECT into local variables. */ |
+ sqlite3_int64 iStartBlock = sqlite3_column_int64(pStmt, 1); |
+ sqlite3_int64 iLeavesEndBlock = sqlite3_column_int64(pStmt, 2); |
+ sqlite3_int64 iEndBlock = sqlite3_column_int64(pStmt, 3); |
+ int nRoot = sqlite3_column_bytes(pStmt, 4); |
+ char const *zRoot = sqlite3_column_blob(pStmt, 4); |
+ |
+ /* If nSegment is a multiple of 16 the array needs to be extended. */ |
+ if( (pCsr->nSegment%16)==0 ){ |
+ Fts3SegReader **apNew; |
+ int nByte = (pCsr->nSegment + 16)*sizeof(Fts3SegReader*); |
+ apNew = (Fts3SegReader **)sqlite3_realloc(pCsr->apSegment, nByte); |
+ if( !apNew ){ |
+ rc = SQLITE_NOMEM; |
+ goto finished; |
} |
- dlrDestroy(&readers[0]); |
+ pCsr->apSegment = apNew; |
+ } |
+ |
+ /* If zTerm is not NULL, and this segment is not stored entirely on its |
+ ** root node, the range of leaves scanned can be reduced. Do this. */ |
+ if( iStartBlock && zTerm ){ |
+ sqlite3_int64 *pi = (isPrefix ? &iLeavesEndBlock : 0); |
+ rc = fts3SelectLeaf(p, zTerm, nTerm, zRoot, nRoot, &iStartBlock, pi); |
+ if( rc!=SQLITE_OK ) goto finished; |
+ if( isPrefix==0 && isScan==0 ) iLeavesEndBlock = iStartBlock; |
} |
+ |
+ rc = sqlite3Fts3SegReaderNew(iAge, iStartBlock, iLeavesEndBlock, |
+ iEndBlock, zRoot, nRoot, &pCsr->apSegment[pCsr->nSegment] |
+ ); |
+ if( rc!=SQLITE_OK ) goto finished; |
+ pCsr->nSegment++; |
+ iAge++; |
} |
} |
- dataBufferDestroy(&result); |
+ finished: |
+ rc2 = sqlite3_reset(pStmt); |
+ if( rc==SQLITE_DONE ) rc = rc2; |
+ sqlite3Fts3SegReaderFree(pPending); |
+ |
return rc; |
} |
-/* Scan the database and merge together the posting lists for the term |
-** into *out. |
-*/ |
-static int termSelect( |
- fulltext_vtab *v, |
- int iColumn, |
- const char *pTerm, int nTerm, /* Term to query for */ |
- int isPrefix, /* True for a prefix search */ |
- DocListType iType, |
- DataBuffer *out /* Write results here */ |
+ |
+static int fts3TermSegReaderCursor( |
+ Fts3Cursor *pCsr, /* Virtual table cursor handle */ |
+ const char *zTerm, /* Term to query for */ |
+ int nTerm, /* Size of zTerm in bytes */ |
+ int isPrefix, /* True for a prefix search */ |
+ Fts3SegReaderCursor **ppSegcsr /* OUT: Allocated seg-reader cursor */ |
){ |
- DataBuffer doclist; |
- sqlite3_stmt *s; |
- int rc = sql_get_statement(v, SEGDIR_SELECT_ALL_STMT, &s); |
- if( rc!=SQLITE_OK ) return rc; |
+ Fts3SegReaderCursor *pSegcsr; /* Object to allocate and return */ |
+ int rc = SQLITE_NOMEM; /* Return code */ |
+ |
+ pSegcsr = sqlite3_malloc(sizeof(Fts3SegReaderCursor)); |
+ if( pSegcsr ){ |
+ Fts3Table *p = (Fts3Table *)pCsr->base.pVtab; |
+ int i; |
+ int nCost = 0; |
+ rc = sqlite3Fts3SegReaderCursor( |
+ p, FTS3_SEGCURSOR_ALL, zTerm, nTerm, isPrefix, 0, pSegcsr); |
+ |
+ for(i=0; rc==SQLITE_OK && i<pSegcsr->nSegment; i++){ |
+ rc = sqlite3Fts3SegReaderCost(pCsr, pSegcsr->apSegment[i], &nCost); |
+ } |
+ pSegcsr->nCost = nCost; |
+ } |
- /* This code should never be called with buffered updates. */ |
- assert( v->nPendingData<0 ); |
+ *ppSegcsr = pSegcsr; |
+ return rc; |
+} |
- dataBufferInit(&doclist, 0); |
- dataBufferInit(out, 0); |
+static void fts3SegReaderCursorFree(Fts3SegReaderCursor *pSegcsr){ |
+ sqlite3Fts3SegReaderFinish(pSegcsr); |
+ sqlite3_free(pSegcsr); |
+} |
- /* Traverse the segments from oldest to newest so that newer doclist |
- ** elements for given docids overwrite older elements. |
- */ |
- while( (rc = sqlite3_step(s))==SQLITE_ROW ){ |
- const char *pData = sqlite3_column_blob(s, 2); |
- const int nData = sqlite3_column_bytes(s, 2); |
- const sqlite_int64 iLeavesEnd = sqlite3_column_int64(s, 1); |
- |
- /* Corrupt if we get back different types than we stored. */ |
- if( sqlite3_column_type(s, 1)!=SQLITE_INTEGER || |
- sqlite3_column_type(s, 2)!=SQLITE_BLOB ){ |
- rc = SQLITE_CORRUPT_BKPT; |
- goto err; |
- } |
+/* |
+** This function retreives the doclist for the specified term (or term |
+** prefix) from the database. |
+** |
+** The returned doclist may be in one of two formats, depending on the |
+** value of parameter isReqPos. If isReqPos is zero, then the doclist is |
+** a sorted list of delta-compressed docids (a bare doclist). If isReqPos |
+** is non-zero, then the returned list is in the same format as is stored |
+** in the database without the found length specifier at the start of on-disk |
+** doclists. |
+*/ |
+static int fts3TermSelect( |
+ Fts3Table *p, /* Virtual table handle */ |
+ Fts3PhraseToken *pTok, /* Token to query for */ |
+ int iColumn, /* Column to query (or -ve for all columns) */ |
+ int isReqPos, /* True to include position lists in output */ |
+ int *pnOut, /* OUT: Size of buffer at *ppOut */ |
+ char **ppOut /* OUT: Malloced result buffer */ |
+){ |
+ int rc; /* Return code */ |
+ Fts3SegReaderCursor *pSegcsr; /* Seg-reader cursor for this term */ |
+ TermSelect tsc; /* Context object for fts3TermSelectCb() */ |
+ Fts3SegFilter filter; /* Segment term filter configuration */ |
+ |
+ pSegcsr = pTok->pSegcsr; |
+ memset(&tsc, 0, sizeof(TermSelect)); |
+ tsc.isReqPos = isReqPos; |
+ |
+ filter.flags = FTS3_SEGMENT_IGNORE_EMPTY |
+ | (pTok->isPrefix ? FTS3_SEGMENT_PREFIX : 0) |
+ | (isReqPos ? FTS3_SEGMENT_REQUIRE_POS : 0) |
+ | (iColumn<p->nColumn ? FTS3_SEGMENT_COLUMN_FILTER : 0); |
+ filter.iCol = iColumn; |
+ filter.zTerm = pTok->z; |
+ filter.nTerm = pTok->n; |
+ |
+ rc = sqlite3Fts3SegReaderStart(p, pSegcsr, &filter); |
+ while( SQLITE_OK==rc |
+ && SQLITE_ROW==(rc = sqlite3Fts3SegReaderStep(p, pSegcsr)) |
+ ){ |
+ rc = fts3TermSelectCb(p, (void *)&tsc, |
+ pSegcsr->zTerm, pSegcsr->nTerm, pSegcsr->aDoclist, pSegcsr->nDoclist |
+ ); |
+ } |
- rc = loadSegment(v, pData, nData, iLeavesEnd, pTerm, nTerm, isPrefix, |
- &doclist); |
- if( rc!=SQLITE_OK ) goto err; |
+ if( rc==SQLITE_OK ){ |
+ rc = fts3TermSelectMerge(&tsc); |
} |
- if( rc==SQLITE_DONE ){ |
- rc = SQLITE_OK; |
- if( doclist.nData!=0 ){ |
- /* TODO(shess) The old term_select_all() code applied the column |
- ** restrict as we merged segments, leading to smaller buffers. |
- ** This is probably worthwhile to bring back, once the new storage |
- ** system is checked in. |
- */ |
- if( iColumn==v->nColumn) iColumn = -1; |
- rc = docListTrim(DL_DEFAULT, doclist.pData, doclist.nData, |
- iColumn, iType, out); |
+ if( rc==SQLITE_OK ){ |
+ *ppOut = tsc.aaOutput[0]; |
+ *pnOut = tsc.anOutput[0]; |
+ }else{ |
+ int i; |
+ for(i=0; i<SizeofArray(tsc.aaOutput); i++){ |
+ sqlite3_free(tsc.aaOutput[i]); |
} |
} |
- err: |
- sqlite3_reset(s); /* So we don't leave a lock. */ |
- dataBufferDestroy(&doclist); |
+ fts3SegReaderCursorFree(pSegcsr); |
+ pTok->pSegcsr = 0; |
return rc; |
} |
-/****************************************************************/ |
-/* Used to hold hashtable data for sorting. */ |
-typedef struct TermData { |
- const char *pTerm; |
- int nTerm; |
- DLCollector *pCollector; |
-} TermData; |
+/* |
+** This function counts the total number of docids in the doclist stored |
+** in buffer aList[], size nList bytes. |
+** |
+** If the isPoslist argument is true, then it is assumed that the doclist |
+** contains a position-list following each docid. Otherwise, it is assumed |
+** that the doclist is simply a list of docids stored as delta encoded |
+** varints. |
+*/ |
+static int fts3DoclistCountDocids(int isPoslist, char *aList, int nList){ |
+ int nDoc = 0; /* Return value */ |
+ if( aList ){ |
+ char *aEnd = &aList[nList]; /* Pointer to one byte after EOF */ |
+ char *p = aList; /* Cursor */ |
+ if( !isPoslist ){ |
+ /* The number of docids in the list is the same as the number of |
+ ** varints. In FTS3 a varint consists of a single byte with the 0x80 |
+ ** bit cleared and zero or more bytes with the 0x80 bit set. So to |
+ ** count the varints in the buffer, just count the number of bytes |
+ ** with the 0x80 bit clear. */ |
+ while( p<aEnd ) nDoc += (((*p++)&0x80)==0); |
+ }else{ |
+ while( p<aEnd ){ |
+ nDoc++; |
+ while( (*p++)&0x80 ); /* Skip docid varint */ |
+ fts3PoslistCopy(0, &p); /* Skip over position list */ |
+ } |
+ } |
+ } |
-/* Orders TermData elements in strcmp fashion ( <0 for less-than, 0 |
-** for equal, >0 for greater-than). |
-*/ |
-static int termDataCmp(const void *av, const void *bv){ |
- const TermData *a = (const TermData *)av; |
- const TermData *b = (const TermData *)bv; |
- int n = a->nTerm<b->nTerm ? a->nTerm : b->nTerm; |
- int c = memcmp(a->pTerm, b->pTerm, n); |
- if( c!=0 ) return c; |
- return a->nTerm-b->nTerm; |
+ return nDoc; |
} |
-/* Order pTerms data by term, then write a new level 0 segment using |
-** LeafWriter. |
+/* |
+** Call sqlite3Fts3DeferToken() for each token in the expression pExpr. |
*/ |
-static int writeZeroSegment(fulltext_vtab *v, fts3Hash *pTerms){ |
- fts3HashElem *e; |
- int idx, rc, i, n; |
- TermData *pData; |
- LeafWriter writer; |
- DataBuffer dl; |
- |
- /* Determine the next index at level 0, merging as necessary. */ |
- rc = segdirNextIndex(v, 0, &idx); |
- if( rc!=SQLITE_OK ) return rc; |
- |
- n = fts3HashCount(pTerms); |
- pData = sqlite3_malloc(n*sizeof(TermData)); |
- |
- for(i = 0, e = fts3HashFirst(pTerms); e; i++, e = fts3HashNext(e)){ |
- assert( i<n ); |
- pData[i].pTerm = fts3HashKey(e); |
- pData[i].nTerm = fts3HashKeysize(e); |
- pData[i].pCollector = fts3HashData(e); |
- } |
- assert( i==n ); |
- |
- /* TODO(shess) Should we allow user-defined collation sequences, |
- ** here? I think we only need that once we support prefix searches. |
- */ |
- if( n>1 ) qsort(pData, n, sizeof(*pData), termDataCmp); |
- |
- /* TODO(shess) Refactor so that we can write directly to the segment |
- ** DataBuffer, as happens for segment merges. |
- */ |
- leafWriterInit(0, idx, &writer); |
- dataBufferInit(&dl, 0); |
- for(i=0; i<n; i++){ |
- dataBufferReset(&dl); |
- dlcAddDoclist(pData[i].pCollector, &dl); |
- rc = leafWriterStep(v, &writer, |
- pData[i].pTerm, pData[i].nTerm, dl.pData, dl.nData); |
- if( rc!=SQLITE_OK ) goto err; |
+static int fts3DeferExpression(Fts3Cursor *pCsr, Fts3Expr *pExpr){ |
+ int rc = SQLITE_OK; |
+ if( pExpr ){ |
+ rc = fts3DeferExpression(pCsr, pExpr->pLeft); |
+ if( rc==SQLITE_OK ){ |
+ rc = fts3DeferExpression(pCsr, pExpr->pRight); |
+ } |
+ if( pExpr->eType==FTSQUERY_PHRASE ){ |
+ int iCol = pExpr->pPhrase->iColumn; |
+ int i; |
+ for(i=0; rc==SQLITE_OK && i<pExpr->pPhrase->nToken; i++){ |
+ Fts3PhraseToken *pToken = &pExpr->pPhrase->aToken[i]; |
+ if( pToken->pDeferred==0 ){ |
+ rc = sqlite3Fts3DeferToken(pCsr, pToken, iCol); |
+ } |
+ } |
+ } |
} |
- rc = leafWriterFinalize(v, &writer); |
- |
- err: |
- dataBufferDestroy(&dl); |
- sqlite3_free(pData); |
- leafWriterDestroy(&writer); |
return rc; |
} |
-/* If pendingTerms has data, free it. */ |
-static int clearPendingTerms(fulltext_vtab *v){ |
- if( v->nPendingData>=0 ){ |
- fts3HashElem *e; |
- for(e=fts3HashFirst(&v->pendingTerms); e; e=fts3HashNext(e)){ |
- dlcDelete(fts3HashData(e)); |
+/* |
+** This function removes the position information from a doclist. When |
+** called, buffer aList (size *pnList bytes) contains a doclist that includes |
+** position information. This function removes the position information so |
+** that aList contains only docids, and adjusts *pnList to reflect the new |
+** (possibly reduced) size of the doclist. |
+*/ |
+static void fts3DoclistStripPositions( |
+ char *aList, /* IN/OUT: Buffer containing doclist */ |
+ int *pnList /* IN/OUT: Size of doclist in bytes */ |
+){ |
+ if( aList ){ |
+ char *aEnd = &aList[*pnList]; /* Pointer to one byte after EOF */ |
+ char *p = aList; /* Input cursor */ |
+ char *pOut = aList; /* Output cursor */ |
+ |
+ while( p<aEnd ){ |
+ sqlite3_int64 delta; |
+ p += sqlite3Fts3GetVarint(p, &delta); |
+ fts3PoslistCopy(0, &p); |
+ pOut += sqlite3Fts3PutVarint(pOut, delta); |
} |
- fts3HashClear(&v->pendingTerms); |
- v->nPendingData = -1; |
- } |
- return SQLITE_OK; |
-} |
-/* If pendingTerms has data, flush it to a level-zero segment, and |
-** free it. |
-*/ |
-static int flushPendingTerms(fulltext_vtab *v){ |
- if( v->nPendingData>=0 ){ |
- int rc = writeZeroSegment(v, &v->pendingTerms); |
- if( rc==SQLITE_OK ) clearPendingTerms(v); |
- return rc; |
+ *pnList = (int)(pOut - aList); |
} |
- return SQLITE_OK; |
} |
-/* If pendingTerms is "too big", or docid is out of order, flush it. |
-** Regardless, be certain that pendingTerms is initialized for use. |
-*/ |
-static int initPendingTerms(fulltext_vtab *v, sqlite_int64 iDocid){ |
- /* TODO(shess) Explore whether partially flushing the buffer on |
- ** forced-flush would provide better performance. I suspect that if |
- ** we ordered the doclists by size and flushed the largest until the |
- ** buffer was half empty, that would let the less frequent terms |
- ** generate longer doclists. |
+/* |
+** Return a DocList corresponding to the phrase *pPhrase. |
+** |
+** If this function returns SQLITE_OK, but *pnOut is set to a negative value, |
+** then no tokens in the phrase were looked up in the full-text index. This |
+** is only possible when this function is called from within xFilter(). The |
+** caller should assume that all documents match the phrase. The actual |
+** filtering will take place in xNext(). |
+*/ |
+static int fts3PhraseSelect( |
+ Fts3Cursor *pCsr, /* Virtual table cursor handle */ |
+ Fts3Phrase *pPhrase, /* Phrase to return a doclist for */ |
+ int isReqPos, /* True if output should contain positions */ |
+ char **paOut, /* OUT: Pointer to malloc'd result buffer */ |
+ int *pnOut /* OUT: Size of buffer at *paOut */ |
+){ |
+ char *pOut = 0; |
+ int nOut = 0; |
+ int rc = SQLITE_OK; |
+ int ii; |
+ int iCol = pPhrase->iColumn; |
+ int isTermPos = (pPhrase->nToken>1 || isReqPos); |
+ Fts3Table *p = (Fts3Table *)pCsr->base.pVtab; |
+ int isFirst = 1; |
+ |
+ int iPrevTok = 0; |
+ int nDoc = 0; |
+ |
+ /* If this is an xFilter() evaluation, create a segment-reader for each |
+ ** phrase token. Or, if this is an xNext() or snippet/offsets/matchinfo |
+ ** evaluation, only create segment-readers if there are no Fts3DeferredToken |
+ ** objects attached to the phrase-tokens. |
*/ |
- if( iDocid<=v->iPrevDocid || v->nPendingData>kPendingThreshold ){ |
- int rc = flushPendingTerms(v); |
- if( rc!=SQLITE_OK ) return rc; |
- } |
- if( v->nPendingData<0 ){ |
- fts3HashInit(&v->pendingTerms, FTS3_HASH_STRING, 1); |
- v->nPendingData = 0; |
+ for(ii=0; ii<pPhrase->nToken; ii++){ |
+ Fts3PhraseToken *pTok = &pPhrase->aToken[ii]; |
+ if( pTok->pSegcsr==0 ){ |
+ if( (pCsr->eEvalmode==FTS3_EVAL_FILTER) |
+ || (pCsr->eEvalmode==FTS3_EVAL_NEXT && pCsr->pDeferred==0) |
+ || (pCsr->eEvalmode==FTS3_EVAL_MATCHINFO && pTok->bFulltext) |
+ ){ |
+ rc = fts3TermSegReaderCursor( |
+ pCsr, pTok->z, pTok->n, pTok->isPrefix, &pTok->pSegcsr |
+ ); |
+ if( rc!=SQLITE_OK ) return rc; |
+ } |
+ } |
} |
- v->iPrevDocid = iDocid; |
- return SQLITE_OK; |
-} |
-/* This function implements the xUpdate callback; it is the top-level entry |
- * point for inserting, deleting or updating a row in a full-text table. */ |
-static int fulltextUpdate(sqlite3_vtab *pVtab, int nArg, sqlite3_value **ppArg, |
- sqlite_int64 *pRowid){ |
- fulltext_vtab *v = (fulltext_vtab *) pVtab; |
- int rc; |
+ for(ii=0; ii<pPhrase->nToken; ii++){ |
+ Fts3PhraseToken *pTok; /* Token to find doclist for */ |
+ int iTok = 0; /* The token being queried this iteration */ |
+ char *pList = 0; /* Pointer to token doclist */ |
+ int nList = 0; /* Size of buffer at pList */ |
- FTSTRACE(("FTS3 Update %p\n", pVtab)); |
+ /* Select a token to process. If this is an xFilter() call, then tokens |
+ ** are processed in order from least to most costly. Otherwise, tokens |
+ ** are processed in the order in which they occur in the phrase. |
+ */ |
+ if( pCsr->eEvalmode==FTS3_EVAL_MATCHINFO ){ |
+ assert( isReqPos ); |
+ iTok = ii; |
+ pTok = &pPhrase->aToken[iTok]; |
+ if( pTok->bFulltext==0 ) continue; |
+ }else if( pCsr->eEvalmode==FTS3_EVAL_NEXT || isReqPos ){ |
+ iTok = ii; |
+ pTok = &pPhrase->aToken[iTok]; |
+ }else{ |
+ int nMinCost = 0x7FFFFFFF; |
+ int jj; |
+ |
+ /* Find the remaining token with the lowest cost. */ |
+ for(jj=0; jj<pPhrase->nToken; jj++){ |
+ Fts3SegReaderCursor *pSegcsr = pPhrase->aToken[jj].pSegcsr; |
+ if( pSegcsr && pSegcsr->nCost<nMinCost ){ |
+ iTok = jj; |
+ nMinCost = pSegcsr->nCost; |
+ } |
+ } |
+ pTok = &pPhrase->aToken[iTok]; |
- if( nArg<2 ){ |
- rc = index_delete(v, sqlite3_value_int64(ppArg[0])); |
- if( rc==SQLITE_OK ){ |
- /* If we just deleted the last row in the table, clear out the |
- ** index data. |
+ /* This branch is taken if it is determined that loading the doclist |
+ ** for the next token would require more IO than loading all documents |
+ ** currently identified by doclist pOut/nOut. No further doclists will |
+ ** be loaded from the full-text index for this phrase. |
*/ |
- rc = content_exists(v); |
- if( rc==SQLITE_ROW ){ |
- rc = SQLITE_OK; |
- }else if( rc==SQLITE_DONE ){ |
- /* Clear the pending terms so we don't flush a useless level-0 |
- ** segment when the transaction closes. |
- */ |
- rc = clearPendingTerms(v); |
- if( rc==SQLITE_OK ){ |
- rc = segdir_delete_all(v); |
- } |
+ if( nMinCost>nDoc && ii>0 ){ |
+ rc = fts3DeferExpression(pCsr, pCsr->pExpr); |
+ break; |
} |
} |
- } else if( sqlite3_value_type(ppArg[0]) != SQLITE_NULL ){ |
- /* An update: |
- * ppArg[0] = old rowid |
- * ppArg[1] = new rowid |
- * ppArg[2..2+v->nColumn-1] = values |
- * ppArg[2+v->nColumn] = value for magic column (we ignore this) |
- * ppArg[2+v->nColumn+1] = value for docid |
- */ |
- sqlite_int64 rowid = sqlite3_value_int64(ppArg[0]); |
- if( sqlite3_value_type(ppArg[1]) != SQLITE_INTEGER || |
- sqlite3_value_int64(ppArg[1]) != rowid ){ |
- rc = SQLITE_ERROR; /* we don't allow changing the rowid */ |
- }else if( sqlite3_value_type(ppArg[2+v->nColumn+1]) != SQLITE_INTEGER || |
- sqlite3_value_int64(ppArg[2+v->nColumn+1]) != rowid ){ |
- rc = SQLITE_ERROR; /* we don't allow changing the docid */ |
+ |
+ if( pCsr->eEvalmode==FTS3_EVAL_NEXT && pTok->pDeferred ){ |
+ rc = fts3DeferredTermSelect(pTok->pDeferred, isTermPos, &nList, &pList); |
}else{ |
- assert( nArg==2+v->nColumn+2); |
- rc = index_update(v, rowid, &ppArg[2]); |
+ if( pTok->pSegcsr ){ |
+ rc = fts3TermSelect(p, pTok, iCol, isTermPos, &nList, &pList); |
+ } |
+ pTok->bFulltext = 1; |
} |
- } else { |
- /* An insert: |
- * ppArg[1] = requested rowid |
- * ppArg[2..2+v->nColumn-1] = values |
- * ppArg[2+v->nColumn] = value for magic column (we ignore this) |
- * ppArg[2+v->nColumn+1] = value for docid |
- */ |
- sqlite3_value *pRequestDocid = ppArg[2+v->nColumn+1]; |
- assert( nArg==2+v->nColumn+2); |
- if( SQLITE_NULL != sqlite3_value_type(pRequestDocid) && |
- SQLITE_NULL != sqlite3_value_type(ppArg[1]) ){ |
- /* TODO(shess) Consider allowing this to work if the values are |
- ** identical. I'm inclined to discourage that usage, though, |
- ** given that both rowid and docid are special columns. Better |
- ** would be to define one or the other as the default winner, |
- ** but should it be fts3-centric (docid) or SQLite-centric |
- ** (rowid)? |
- */ |
- rc = SQLITE_ERROR; |
+ assert( rc!=SQLITE_OK || pCsr->eEvalmode || pTok->pSegcsr==0 ); |
+ if( rc!=SQLITE_OK ) break; |
+ |
+ if( isFirst ){ |
+ pOut = pList; |
+ nOut = nList; |
+ if( pCsr->eEvalmode==FTS3_EVAL_FILTER && pPhrase->nToken>1 ){ |
+ nDoc = fts3DoclistCountDocids(1, pOut, nOut); |
+ } |
+ isFirst = 0; |
+ iPrevTok = iTok; |
}else{ |
- if( SQLITE_NULL == sqlite3_value_type(pRequestDocid) ){ |
- pRequestDocid = ppArg[1]; |
+ /* Merge the new term list and the current output. */ |
+ char *aLeft, *aRight; |
+ int nLeft, nRight; |
+ int nDist; |
+ int mt; |
+ |
+ /* If this is the final token of the phrase, and positions were not |
+ ** requested by the caller, use MERGE_PHRASE instead of POS_PHRASE. |
+ ** This drops the position information from the output list. |
+ */ |
+ mt = MERGE_POS_PHRASE; |
+ if( ii==pPhrase->nToken-1 && !isReqPos ) mt = MERGE_PHRASE; |
+ |
+ assert( iPrevTok!=iTok ); |
+ if( iPrevTok<iTok ){ |
+ aLeft = pOut; |
+ nLeft = nOut; |
+ aRight = pList; |
+ nRight = nList; |
+ nDist = iTok-iPrevTok; |
+ iPrevTok = iTok; |
+ }else{ |
+ aRight = pOut; |
+ nRight = nOut; |
+ aLeft = pList; |
+ nLeft = nList; |
+ nDist = iPrevTok-iTok; |
} |
- rc = index_insert(v, pRequestDocid, &ppArg[2], pRowid); |
+ pOut = aRight; |
+ fts3DoclistMerge( |
+ mt, nDist, 0, pOut, &nOut, aLeft, nLeft, aRight, nRight, &nDoc |
+ ); |
+ sqlite3_free(aLeft); |
} |
+ assert( nOut==0 || pOut!=0 ); |
} |
+ if( rc==SQLITE_OK ){ |
+ if( ii!=pPhrase->nToken ){ |
+ assert( pCsr->eEvalmode==FTS3_EVAL_FILTER && isReqPos==0 ); |
+ fts3DoclistStripPositions(pOut, &nOut); |
+ } |
+ *paOut = pOut; |
+ *pnOut = nOut; |
+ }else{ |
+ sqlite3_free(pOut); |
+ } |
return rc; |
} |
-static int fulltextSync(sqlite3_vtab *pVtab){ |
- FTSTRACE(("FTS3 xSync()\n")); |
- return flushPendingTerms((fulltext_vtab *)pVtab); |
-} |
+/* |
+** This function merges two doclists according to the requirements of a |
+** NEAR operator. |
+** |
+** Both input doclists must include position information. The output doclist |
+** includes position information if the first argument to this function |
+** is MERGE_POS_NEAR, or does not if it is MERGE_NEAR. |
+*/ |
+static int fts3NearMerge( |
+ int mergetype, /* MERGE_POS_NEAR or MERGE_NEAR */ |
+ int nNear, /* Parameter to NEAR operator */ |
+ int nTokenLeft, /* Number of tokens in LHS phrase arg */ |
+ char *aLeft, /* Doclist for LHS (incl. positions) */ |
+ int nLeft, /* Size of LHS doclist in bytes */ |
+ int nTokenRight, /* As nTokenLeft */ |
+ char *aRight, /* As aLeft */ |
+ int nRight, /* As nRight */ |
+ char **paOut, /* OUT: Results of merge (malloced) */ |
+ int *pnOut /* OUT: Sized of output buffer */ |
+){ |
+ char *aOut; /* Buffer to write output doclist to */ |
+ int rc; /* Return code */ |
-static int fulltextBegin(sqlite3_vtab *pVtab){ |
- fulltext_vtab *v = (fulltext_vtab *) pVtab; |
- FTSTRACE(("FTS3 xBegin()\n")); |
+ assert( mergetype==MERGE_POS_NEAR || MERGE_NEAR ); |
- /* Any buffered updates should have been cleared by the previous |
- ** transaction. |
- */ |
- assert( v->nPendingData<0 ); |
- return clearPendingTerms(v); |
+ aOut = sqlite3_malloc(nLeft+nRight+1); |
+ if( aOut==0 ){ |
+ rc = SQLITE_NOMEM; |
+ }else{ |
+ rc = fts3DoclistMerge(mergetype, nNear+nTokenRight, nNear+nTokenLeft, |
+ aOut, pnOut, aLeft, nLeft, aRight, nRight, 0 |
+ ); |
+ if( rc!=SQLITE_OK ){ |
+ sqlite3_free(aOut); |
+ aOut = 0; |
+ } |
+ } |
+ |
+ *paOut = aOut; |
+ return rc; |
} |
-static int fulltextCommit(sqlite3_vtab *pVtab){ |
- fulltext_vtab *v = (fulltext_vtab *) pVtab; |
- FTSTRACE(("FTS3 xCommit()\n")); |
+/* |
+** This function is used as part of the processing for the snippet() and |
+** offsets() functions. |
+** |
+** Both pLeft and pRight are expression nodes of type FTSQUERY_PHRASE. Both |
+** have their respective doclists (including position information) loaded |
+** in Fts3Expr.aDoclist/nDoclist. This function removes all entries from |
+** each doclist that are not within nNear tokens of a corresponding entry |
+** in the other doclist. |
+*/ |
+int sqlite3Fts3ExprNearTrim(Fts3Expr *pLeft, Fts3Expr *pRight, int nNear){ |
+ int rc; /* Return code */ |
+ |
+ assert( pLeft->eType==FTSQUERY_PHRASE ); |
+ assert( pRight->eType==FTSQUERY_PHRASE ); |
+ assert( pLeft->isLoaded && pRight->isLoaded ); |
+ |
+ if( pLeft->aDoclist==0 || pRight->aDoclist==0 ){ |
+ sqlite3_free(pLeft->aDoclist); |
+ sqlite3_free(pRight->aDoclist); |
+ pRight->aDoclist = 0; |
+ pLeft->aDoclist = 0; |
+ rc = SQLITE_OK; |
+ }else{ |
+ char *aOut; /* Buffer in which to assemble new doclist */ |
+ int nOut; /* Size of buffer aOut in bytes */ |
- /* Buffered updates should have been cleared by fulltextSync(). */ |
- assert( v->nPendingData<0 ); |
- return clearPendingTerms(v); |
+ rc = fts3NearMerge(MERGE_POS_NEAR, nNear, |
+ pLeft->pPhrase->nToken, pLeft->aDoclist, pLeft->nDoclist, |
+ pRight->pPhrase->nToken, pRight->aDoclist, pRight->nDoclist, |
+ &aOut, &nOut |
+ ); |
+ if( rc!=SQLITE_OK ) return rc; |
+ sqlite3_free(pRight->aDoclist); |
+ pRight->aDoclist = aOut; |
+ pRight->nDoclist = nOut; |
+ |
+ rc = fts3NearMerge(MERGE_POS_NEAR, nNear, |
+ pRight->pPhrase->nToken, pRight->aDoclist, pRight->nDoclist, |
+ pLeft->pPhrase->nToken, pLeft->aDoclist, pLeft->nDoclist, |
+ &aOut, &nOut |
+ ); |
+ sqlite3_free(pLeft->aDoclist); |
+ pLeft->aDoclist = aOut; |
+ pLeft->nDoclist = nOut; |
+ } |
+ return rc; |
} |
-static int fulltextRollback(sqlite3_vtab *pVtab){ |
- FTSTRACE(("FTS3 xRollback()\n")); |
- return clearPendingTerms((fulltext_vtab *)pVtab); |
-} |
/* |
-** Implementation of the snippet() function for FTS3 |
-*/ |
-static void snippetFunc( |
- sqlite3_context *pContext, |
- int argc, |
- sqlite3_value **argv |
+** Allocate an Fts3SegReaderArray for each token in the expression pExpr. |
+** The allocated objects are stored in the Fts3PhraseToken.pArray member |
+** variables of each token structure. |
+*/ |
+static int fts3ExprAllocateSegReaders( |
+ Fts3Cursor *pCsr, /* FTS3 table */ |
+ Fts3Expr *pExpr, /* Expression to create seg-readers for */ |
+ int *pnExpr /* OUT: Number of AND'd expressions */ |
){ |
- fulltext_cursor *pCursor; |
- if( argc<1 ) return; |
- if( sqlite3_value_type(argv[0])!=SQLITE_BLOB || |
- sqlite3_value_bytes(argv[0])!=sizeof(pCursor) ){ |
- sqlite3_result_error(pContext, "illegal first argument to html_snippet",-1); |
- }else{ |
- const char *zStart = "<b>"; |
- const char *zEnd = "</b>"; |
- const char *zEllipsis = "<b>...</b>"; |
- memcpy(&pCursor, sqlite3_value_blob(argv[0]), sizeof(pCursor)); |
- if( argc>=2 ){ |
- zStart = (const char*)sqlite3_value_text(argv[1]); |
- if( argc>=3 ){ |
- zEnd = (const char*)sqlite3_value_text(argv[2]); |
- if( argc>=4 ){ |
- zEllipsis = (const char*)sqlite3_value_text(argv[3]); |
- } |
+ int rc = SQLITE_OK; /* Return code */ |
+ |
+ assert( pCsr->eEvalmode==FTS3_EVAL_FILTER ); |
+ if( pnExpr && pExpr->eType!=FTSQUERY_AND ){ |
+ (*pnExpr)++; |
+ pnExpr = 0; |
+ } |
+ |
+ if( pExpr->eType==FTSQUERY_PHRASE ){ |
+ Fts3Phrase *pPhrase = pExpr->pPhrase; |
+ int ii; |
+ |
+ for(ii=0; rc==SQLITE_OK && ii<pPhrase->nToken; ii++){ |
+ Fts3PhraseToken *pTok = &pPhrase->aToken[ii]; |
+ if( pTok->pSegcsr==0 ){ |
+ rc = fts3TermSegReaderCursor( |
+ pCsr, pTok->z, pTok->n, pTok->isPrefix, &pTok->pSegcsr |
+ ); |
} |
} |
- snippetAllOffsets(pCursor); |
- snippetText(pCursor, zStart, zEnd, zEllipsis); |
- sqlite3_result_text(pContext, pCursor->snippet.zSnippet, |
- pCursor->snippet.nSnippet, SQLITE_STATIC); |
+ }else{ |
+ rc = fts3ExprAllocateSegReaders(pCsr, pExpr->pLeft, pnExpr); |
+ if( rc==SQLITE_OK ){ |
+ rc = fts3ExprAllocateSegReaders(pCsr, pExpr->pRight, pnExpr); |
+ } |
} |
+ return rc; |
} |
/* |
-** Implementation of the offsets() function for FTS3 |
+** Free the Fts3SegReaderArray objects associated with each token in the |
+** expression pExpr. In other words, this function frees the resources |
+** allocated by fts3ExprAllocateSegReaders(). |
*/ |
-static void snippetOffsetsFunc( |
- sqlite3_context *pContext, |
- int argc, |
- sqlite3_value **argv |
-){ |
- fulltext_cursor *pCursor; |
- if( argc<1 ) return; |
- if( sqlite3_value_type(argv[0])!=SQLITE_BLOB || |
- sqlite3_value_bytes(argv[0])!=sizeof(pCursor) ){ |
- sqlite3_result_error(pContext, "illegal first argument to offsets",-1); |
- }else{ |
- memcpy(&pCursor, sqlite3_value_blob(argv[0]), sizeof(pCursor)); |
- snippetAllOffsets(pCursor); |
- snippetOffsetText(&pCursor->snippet); |
- sqlite3_result_text(pContext, |
- pCursor->snippet.zOffset, pCursor->snippet.nOffset, |
- SQLITE_STATIC); |
+static void fts3ExprFreeSegReaders(Fts3Expr *pExpr){ |
+ if( pExpr ){ |
+ Fts3Phrase *pPhrase = pExpr->pPhrase; |
+ if( pPhrase ){ |
+ int kk; |
+ for(kk=0; kk<pPhrase->nToken; kk++){ |
+ fts3SegReaderCursorFree(pPhrase->aToken[kk].pSegcsr); |
+ pPhrase->aToken[kk].pSegcsr = 0; |
+ } |
+ } |
+ fts3ExprFreeSegReaders(pExpr->pLeft); |
+ fts3ExprFreeSegReaders(pExpr->pRight); |
} |
} |
-/* OptLeavesReader is nearly identical to LeavesReader, except that |
-** where LeavesReader is geared towards the merging of complete |
-** segment levels (with exactly MERGE_COUNT segments), OptLeavesReader |
-** is geared towards implementation of the optimize() function, and |
-** can merge all segments simultaneously. This version may be |
-** somewhat less efficient than LeavesReader because it merges into an |
-** accumulator rather than doing an N-way merge, but since segment |
-** size grows exponentially (so segment count logrithmically) this is |
-** probably not an immediate problem. |
-*/ |
-/* TODO(shess): Prove that assertion, or extend the merge code to |
-** merge tree fashion (like the prefix-searching code does). |
-*/ |
-/* TODO(shess): OptLeavesReader and LeavesReader could probably be |
-** merged with little or no loss of performance for LeavesReader. The |
-** merged code would need to handle >MERGE_COUNT segments, and would |
-** also need to be able to optionally optimize away deletes. |
-*/ |
-typedef struct OptLeavesReader { |
- /* Segment number, to order readers by age. */ |
- int segment; |
- LeavesReader reader; |
-} OptLeavesReader; |
- |
-static int optLeavesReaderAtEnd(OptLeavesReader *pReader){ |
- return leavesReaderAtEnd(&pReader->reader); |
-} |
-static int optLeavesReaderTermBytes(OptLeavesReader *pReader){ |
- return leavesReaderTermBytes(&pReader->reader); |
-} |
-static const char *optLeavesReaderData(OptLeavesReader *pReader){ |
- return leavesReaderData(&pReader->reader); |
-} |
-static int optLeavesReaderDataBytes(OptLeavesReader *pReader){ |
- return leavesReaderDataBytes(&pReader->reader); |
-} |
-static const char *optLeavesReaderTerm(OptLeavesReader *pReader){ |
- return leavesReaderTerm(&pReader->reader); |
-} |
-static int optLeavesReaderStep(fulltext_vtab *v, OptLeavesReader *pReader){ |
- return leavesReaderStep(v, &pReader->reader); |
-} |
-static int optLeavesReaderTermCmp(OptLeavesReader *lr1, OptLeavesReader *lr2){ |
- return leavesReaderTermCmp(&lr1->reader, &lr2->reader); |
-} |
-/* Order by term ascending, segment ascending (oldest to newest), with |
-** exhausted readers to the end. |
-*/ |
-static int optLeavesReaderCmp(OptLeavesReader *lr1, OptLeavesReader *lr2){ |
- int c = optLeavesReaderTermCmp(lr1, lr2); |
- if( c!=0 ) return c; |
- return lr1->segment-lr2->segment; |
-} |
-/* Bubble pLr[0] to appropriate place in pLr[1..nLr-1]. Assumes that |
-** pLr[1..nLr-1] is already sorted. |
-*/ |
-static void optLeavesReaderReorder(OptLeavesReader *pLr, int nLr){ |
- while( nLr>1 && optLeavesReaderCmp(pLr, pLr+1)>0 ){ |
- OptLeavesReader tmp = pLr[0]; |
- pLr[0] = pLr[1]; |
- pLr[1] = tmp; |
- nLr--; |
- pLr++; |
+/* |
+** Return the sum of the costs of all tokens in the expression pExpr. This |
+** function must be called after Fts3SegReaderArrays have been allocated |
+** for all tokens using fts3ExprAllocateSegReaders(). |
+*/ |
+static int fts3ExprCost(Fts3Expr *pExpr){ |
+ int nCost; /* Return value */ |
+ if( pExpr->eType==FTSQUERY_PHRASE ){ |
+ Fts3Phrase *pPhrase = pExpr->pPhrase; |
+ int ii; |
+ nCost = 0; |
+ for(ii=0; ii<pPhrase->nToken; ii++){ |
+ Fts3SegReaderCursor *pSegcsr = pPhrase->aToken[ii].pSegcsr; |
+ if( pSegcsr ) nCost += pSegcsr->nCost; |
+ } |
+ }else{ |
+ nCost = fts3ExprCost(pExpr->pLeft) + fts3ExprCost(pExpr->pRight); |
} |
+ return nCost; |
} |
-/* optimize() helper function. Put the readers in order and iterate |
-** through them, merging doclists for matching terms into pWriter. |
-** Returns SQLITE_OK on success, or the SQLite error code which |
-** prevented success. |
-*/ |
-static int optimizeInternal(fulltext_vtab *v, |
- OptLeavesReader *readers, int nReaders, |
- LeafWriter *pWriter){ |
- int i, rc = SQLITE_OK; |
- DataBuffer doclist, merged, tmp; |
- const char *pData; |
- |
- /* Order the readers. */ |
- i = nReaders; |
- while( i-- > 0 ){ |
- optLeavesReaderReorder(&readers[i], nReaders-i); |
+/* |
+** The following is a helper function (and type) for fts3EvalExpr(). It |
+** must be called after Fts3SegReaders have been allocated for every token |
+** in the expression. See the context it is called from in fts3EvalExpr() |
+** for further explanation. |
+*/ |
+typedef struct ExprAndCost ExprAndCost; |
+struct ExprAndCost { |
+ Fts3Expr *pExpr; |
+ int nCost; |
+}; |
+static void fts3ExprAssignCosts( |
+ Fts3Expr *pExpr, /* Expression to create seg-readers for */ |
+ ExprAndCost **ppExprCost /* OUT: Write to *ppExprCost */ |
+){ |
+ if( pExpr->eType==FTSQUERY_AND ){ |
+ fts3ExprAssignCosts(pExpr->pLeft, ppExprCost); |
+ fts3ExprAssignCosts(pExpr->pRight, ppExprCost); |
+ }else{ |
+ (*ppExprCost)->pExpr = pExpr; |
+ (*ppExprCost)->nCost = fts3ExprCost(pExpr); |
+ (*ppExprCost)++; |
} |
+} |
- dataBufferInit(&doclist, LEAF_MAX); |
- dataBufferInit(&merged, LEAF_MAX); |
- |
- /* Exhausted readers bubble to the end, so when the first reader is |
- ** at eof, all are at eof. |
- */ |
- while( !optLeavesReaderAtEnd(&readers[0]) ){ |
- |
- /* Figure out how many readers share the next term. */ |
- for(i=1; i<nReaders && !optLeavesReaderAtEnd(&readers[i]); i++){ |
- if( 0!=optLeavesReaderTermCmp(&readers[0], &readers[i]) ) break; |
- } |
+/* |
+** Evaluate the full-text expression pExpr against FTS3 table pTab. Store |
+** the resulting doclist in *paOut and *pnOut. This routine mallocs for |
+** the space needed to store the output. The caller is responsible for |
+** freeing the space when it has finished. |
+** |
+** This function is called in two distinct contexts: |
+** |
+** * From within the virtual table xFilter() method. In this case, the |
+** output doclist contains entries for all rows in the table, based on |
+** data read from the full-text index. |
+** |
+** In this case, if the query expression contains one or more tokens that |
+** are very common, then the returned doclist may contain a superset of |
+** the documents that actually match the expression. |
+** |
+** * From within the virtual table xNext() method. This call is only made |
+** if the call from within xFilter() found that there were very common |
+** tokens in the query expression and did return a superset of the |
+** matching documents. In this case the returned doclist contains only |
+** entries that correspond to the current row of the table. Instead of |
+** reading the data for each token from the full-text index, the data is |
+** already available in-memory in the Fts3PhraseToken.pDeferred structures. |
+** See fts3EvalDeferred() for how it gets there. |
+** |
+** In the first case above, Fts3Cursor.doDeferred==0. In the second (if it is |
+** required) Fts3Cursor.doDeferred==1. |
+** |
+** If the SQLite invokes the snippet(), offsets() or matchinfo() function |
+** as part of a SELECT on an FTS3 table, this function is called on each |
+** individual phrase expression in the query. If there were very common tokens |
+** found in the xFilter() call, then this function is called once for phrase |
+** for each row visited, and the returned doclist contains entries for the |
+** current row only. Otherwise, if there were no very common tokens, then this |
+** function is called once only for each phrase in the query and the returned |
+** doclist contains entries for all rows of the table. |
+** |
+** Fts3Cursor.doDeferred==1 when this function is called on phrases as a |
+** result of a snippet(), offsets() or matchinfo() invocation. |
+*/ |
+static int fts3EvalExpr( |
+ Fts3Cursor *p, /* Virtual table cursor handle */ |
+ Fts3Expr *pExpr, /* Parsed fts3 expression */ |
+ char **paOut, /* OUT: Pointer to malloc'd result buffer */ |
+ int *pnOut, /* OUT: Size of buffer at *paOut */ |
+ int isReqPos /* Require positions in output buffer */ |
+){ |
+ int rc = SQLITE_OK; /* Return code */ |
- pData = optLeavesReaderData(&readers[0]); |
- if( pData==NULL ){ |
- rc = SQLITE_CORRUPT_BKPT; |
- break; |
- } |
+ /* Zero the output parameters. */ |
+ *paOut = 0; |
+ *pnOut = 0; |
- /* Special-case for no merge. */ |
- if( i==1 ){ |
- /* Trim deletions from the doclist. */ |
- dataBufferReset(&merged); |
- rc = docListTrim(DL_DEFAULT, pData, |
- optLeavesReaderDataBytes(&readers[0]), |
- -1, DL_DEFAULT, &merged); |
- if( rc!=SQLITE_OK ) break; |
- }else{ |
- DLReader dlReaders[MERGE_COUNT]; |
- int iReader, nReaders; |
+ if( pExpr ){ |
+ assert( pExpr->eType==FTSQUERY_NEAR || pExpr->eType==FTSQUERY_OR |
+ || pExpr->eType==FTSQUERY_AND || pExpr->eType==FTSQUERY_NOT |
+ || pExpr->eType==FTSQUERY_PHRASE |
+ ); |
+ assert( pExpr->eType==FTSQUERY_PHRASE || isReqPos==0 ); |
- /* Prime the pipeline with the first reader's doclist. After |
- ** one pass index 0 will reference the accumulated doclist. |
- */ |
- rc = dlrInit(&dlReaders[0], DL_DEFAULT, |
- pData, |
- optLeavesReaderDataBytes(&readers[0])); |
- if( rc!=SQLITE_OK ) break; |
- iReader = 1; |
- |
- assert( iReader<i ); /* Must execute the loop at least once. */ |
- while( iReader<i ){ |
- /* Merge 16 inputs per pass. */ |
- for( nReaders=1; iReader<i && nReaders<MERGE_COUNT; |
- iReader++, nReaders++ ){ |
- pData = optLeavesReaderData(&readers[iReader]); |
- if( pData==NULL ){ |
- rc = SQLITE_CORRUPT_BKPT; |
+ if( pExpr->eType==FTSQUERY_PHRASE ){ |
+ rc = fts3PhraseSelect(p, pExpr->pPhrase, |
+ isReqPos || (pExpr->pParent && pExpr->pParent->eType==FTSQUERY_NEAR), |
+ paOut, pnOut |
+ ); |
+ fts3ExprFreeSegReaders(pExpr); |
+ }else if( p->eEvalmode==FTS3_EVAL_FILTER && pExpr->eType==FTSQUERY_AND ){ |
+ ExprAndCost *aExpr = 0; /* Array of AND'd expressions and costs */ |
+ int nExpr = 0; /* Size of aExpr[] */ |
+ char *aRet = 0; /* Doclist to return to caller */ |
+ int nRet = 0; /* Length of aRet[] in bytes */ |
+ int nDoc = 0x7FFFFFFF; |
+ |
+ assert( !isReqPos ); |
+ |
+ rc = fts3ExprAllocateSegReaders(p, pExpr, &nExpr); |
+ if( rc==SQLITE_OK ){ |
+ assert( nExpr>1 ); |
+ aExpr = sqlite3_malloc(sizeof(ExprAndCost) * nExpr); |
+ if( !aExpr ) rc = SQLITE_NOMEM; |
+ } |
+ if( rc==SQLITE_OK ){ |
+ int ii; /* Used to iterate through expressions */ |
+ |
+ fts3ExprAssignCosts(pExpr, &aExpr); |
+ aExpr -= nExpr; |
+ for(ii=0; ii<nExpr; ii++){ |
+ char *aNew; |
+ int nNew; |
+ int jj; |
+ ExprAndCost *pBest = 0; |
+ |
+ for(jj=0; jj<nExpr; jj++){ |
+ ExprAndCost *pCand = &aExpr[jj]; |
+ if( pCand->pExpr && (pBest==0 || pCand->nCost<pBest->nCost) ){ |
+ pBest = pCand; |
+ } |
+ } |
+ |
+ if( pBest->nCost>nDoc ){ |
+ rc = fts3DeferExpression(p, p->pExpr); |
break; |
+ }else{ |
+ rc = fts3EvalExpr(p, pBest->pExpr, &aNew, &nNew, 0); |
+ if( rc!=SQLITE_OK ) break; |
+ pBest->pExpr = 0; |
+ if( ii==0 ){ |
+ aRet = aNew; |
+ nRet = nNew; |
+ nDoc = fts3DoclistCountDocids(0, aRet, nRet); |
+ }else{ |
+ fts3DoclistMerge( |
+ MERGE_AND, 0, 0, aRet, &nRet, aRet, nRet, aNew, nNew, &nDoc |
+ ); |
+ sqlite3_free(aNew); |
+ } |
} |
- rc = dlrInit(&dlReaders[nReaders], DL_DEFAULT, pData, |
- optLeavesReaderDataBytes(&readers[iReader])); |
- if( rc!=SQLITE_OK ) break; |
- } |
- |
- /* Merge doclists and swap result into accumulator. */ |
- if( rc==SQLITE_OK ){ |
- dataBufferReset(&merged); |
- rc = docListMerge(&merged, dlReaders, nReaders); |
- tmp = merged; |
- merged = doclist; |
- doclist = tmp; |
} |
- |
- while( nReaders-- > 0 ){ |
- dlrDestroy(&dlReaders[nReaders]); |
- } |
- |
- if( rc!=SQLITE_OK ) goto err; |
- |
- /* Accumulated doclist to reader 0 for next pass. */ |
- rc = dlrInit(&dlReaders[0], DL_DEFAULT, doclist.pData, doclist.nData); |
- if( rc!=SQLITE_OK ) goto err; |
} |
- /* Destroy reader that was left in the pipeline. */ |
- dlrDestroy(&dlReaders[0]); |
+ if( rc==SQLITE_OK ){ |
+ *paOut = aRet; |
+ *pnOut = nRet; |
+ }else{ |
+ assert( *paOut==0 ); |
+ sqlite3_free(aRet); |
+ } |
+ sqlite3_free(aExpr); |
+ fts3ExprFreeSegReaders(pExpr); |
- /* Trim deletions from the doclist. */ |
- dataBufferReset(&merged); |
- rc = docListTrim(DL_DEFAULT, doclist.pData, doclist.nData, |
- -1, DL_DEFAULT, &merged); |
- if( rc!=SQLITE_OK ) goto err; |
- } |
+ }else{ |
+ char *aLeft; |
+ char *aRight; |
+ int nLeft; |
+ int nRight; |
+ |
+ assert( pExpr->eType==FTSQUERY_NEAR |
+ || pExpr->eType==FTSQUERY_OR |
+ || pExpr->eType==FTSQUERY_NOT |
+ || (pExpr->eType==FTSQUERY_AND && p->eEvalmode==FTS3_EVAL_NEXT) |
+ ); |
+ |
+ if( 0==(rc = fts3EvalExpr(p, pExpr->pRight, &aRight, &nRight, isReqPos)) |
+ && 0==(rc = fts3EvalExpr(p, pExpr->pLeft, &aLeft, &nLeft, isReqPos)) |
+ ){ |
+ switch( pExpr->eType ){ |
+ case FTSQUERY_NEAR: { |
+ Fts3Expr *pLeft; |
+ Fts3Expr *pRight; |
+ int mergetype = MERGE_NEAR; |
+ if( pExpr->pParent && pExpr->pParent->eType==FTSQUERY_NEAR ){ |
+ mergetype = MERGE_POS_NEAR; |
+ } |
+ pLeft = pExpr->pLeft; |
+ while( pLeft->eType==FTSQUERY_NEAR ){ |
+ pLeft=pLeft->pRight; |
+ } |
+ pRight = pExpr->pRight; |
+ assert( pRight->eType==FTSQUERY_PHRASE ); |
+ assert( pLeft->eType==FTSQUERY_PHRASE ); |
- /* Only pass doclists with hits (skip if all hits deleted). */ |
- if( merged.nData>0 ){ |
- rc = leafWriterStep(v, pWriter, |
- optLeavesReaderTerm(&readers[0]), |
- optLeavesReaderTermBytes(&readers[0]), |
- merged.pData, merged.nData); |
- if( rc!=SQLITE_OK ) goto err; |
- } |
+ rc = fts3NearMerge(mergetype, pExpr->nNear, |
+ pLeft->pPhrase->nToken, aLeft, nLeft, |
+ pRight->pPhrase->nToken, aRight, nRight, |
+ paOut, pnOut |
+ ); |
+ sqlite3_free(aLeft); |
+ break; |
+ } |
- /* Step merged readers to next term and reorder. */ |
- while( i-- > 0 ){ |
- rc = optLeavesReaderStep(v, &readers[i]); |
- if( rc!=SQLITE_OK ) goto err; |
+ case FTSQUERY_OR: { |
+ /* Allocate a buffer for the output. The maximum size is the |
+ ** sum of the sizes of the two input buffers. The +1 term is |
+ ** so that a buffer of zero bytes is never allocated - this can |
+ ** cause fts3DoclistMerge() to incorrectly return SQLITE_NOMEM. |
+ */ |
+ char *aBuffer = sqlite3_malloc(nRight+nLeft+1); |
+ rc = fts3DoclistMerge(MERGE_OR, 0, 0, aBuffer, pnOut, |
+ aLeft, nLeft, aRight, nRight, 0 |
+ ); |
+ *paOut = aBuffer; |
+ sqlite3_free(aLeft); |
+ break; |
+ } |
- optLeavesReaderReorder(&readers[i], nReaders-i); |
+ default: { |
+ assert( FTSQUERY_NOT==MERGE_NOT && FTSQUERY_AND==MERGE_AND ); |
+ fts3DoclistMerge(pExpr->eType, 0, 0, aLeft, pnOut, |
+ aLeft, nLeft, aRight, nRight, 0 |
+ ); |
+ *paOut = aLeft; |
+ break; |
+ } |
+ } |
+ } |
+ sqlite3_free(aRight); |
} |
} |
- err: |
- dataBufferDestroy(&doclist); |
- dataBufferDestroy(&merged); |
+ assert( rc==SQLITE_OK || *paOut==0 ); |
return rc; |
} |
-/* Implement optimize() function for FTS3. optimize(t) merges all |
-** segments in the fts index into a single segment. 't' is the magic |
-** table-named column. |
-*/ |
-static void optimizeFunc(sqlite3_context *pContext, |
- int argc, sqlite3_value **argv){ |
- fulltext_cursor *pCursor; |
- if( argc>1 ){ |
- sqlite3_result_error(pContext, "excess arguments to optimize()",-1); |
- }else if( sqlite3_value_type(argv[0])!=SQLITE_BLOB || |
- sqlite3_value_bytes(argv[0])!=sizeof(pCursor) ){ |
- sqlite3_result_error(pContext, "illegal first argument to optimize",-1); |
+/* |
+** This function is called from within xNext() for each row visited by |
+** an FTS3 query. If evaluating the FTS3 query expression within xFilter() |
+** was able to determine the exact set of matching rows, this function sets |
+** *pbRes to true and returns SQLITE_IO immediately. |
+** |
+** Otherwise, if evaluating the query expression within xFilter() returned a |
+** superset of the matching documents instead of an exact set (this happens |
+** when the query includes very common tokens and it is deemed too expensive to |
+** load their doclists from disk), this function tests if the current row |
+** really does match the FTS3 query. |
+** |
+** If an error occurs, an SQLite error code is returned. Otherwise, SQLITE_OK |
+** is returned and *pbRes is set to true if the current row matches the |
+** FTS3 query (and should be included in the results returned to SQLite), or |
+** false otherwise. |
+*/ |
+static int fts3EvalDeferred( |
+ Fts3Cursor *pCsr, /* FTS3 cursor pointing at row to test */ |
+ int *pbRes /* OUT: Set to true if row is a match */ |
+){ |
+ int rc = SQLITE_OK; |
+ if( pCsr->pDeferred==0 ){ |
+ *pbRes = 1; |
}else{ |
- fulltext_vtab *v; |
- int i, rc, iMaxLevel; |
- OptLeavesReader *readers; |
- int nReaders; |
- LeafWriter writer; |
- sqlite3_stmt *s; |
- |
- memcpy(&pCursor, sqlite3_value_blob(argv[0]), sizeof(pCursor)); |
- v = cursor_vtab(pCursor); |
- |
- /* Flush any buffered updates before optimizing. */ |
- rc = flushPendingTerms(v); |
- if( rc!=SQLITE_OK ) goto err; |
- |
- rc = segdir_count(v, &nReaders, &iMaxLevel); |
- if( rc!=SQLITE_OK ) goto err; |
- if( nReaders==0 || nReaders==1 ){ |
- sqlite3_result_text(pContext, "Index already optimal", -1, |
- SQLITE_STATIC); |
- return; |
+ rc = fts3CursorSeek(0, pCsr); |
+ if( rc==SQLITE_OK ){ |
+ sqlite3Fts3FreeDeferredDoclists(pCsr); |
+ rc = sqlite3Fts3CacheDeferredDoclists(pCsr); |
} |
+ if( rc==SQLITE_OK ){ |
+ char *a = 0; |
+ int n = 0; |
+ rc = fts3EvalExpr(pCsr, pCsr->pExpr, &a, &n, 0); |
+ assert( n>=0 ); |
+ *pbRes = (n>0); |
+ sqlite3_free(a); |
+ } |
+ } |
+ return rc; |
+} |
- rc = sql_get_statement(v, SEGDIR_SELECT_ALL_STMT, &s); |
- if( rc!=SQLITE_OK ) goto err; |
- |
- readers = sqlite3_malloc(nReaders*sizeof(readers[0])); |
- if( readers==NULL ) goto err; |
- |
- /* Note that there will already be a segment at this position |
- ** until we call segdir_delete() on iMaxLevel. |
- */ |
- leafWriterInit(iMaxLevel, 0, &writer); |
- |
- i = 0; |
- while( (rc = sqlite3_step(s))==SQLITE_ROW ){ |
- sqlite_int64 iStart = sqlite3_column_int64(s, 0); |
- sqlite_int64 iEnd = sqlite3_column_int64(s, 1); |
- const char *pRootData = sqlite3_column_blob(s, 2); |
- int nRootData = sqlite3_column_bytes(s, 2); |
- |
- /* Corrupt if we get back different types than we stored. */ |
- if( sqlite3_column_type(s, 0)!=SQLITE_INTEGER || |
- sqlite3_column_type(s, 1)!=SQLITE_INTEGER || |
- sqlite3_column_type(s, 2)!=SQLITE_BLOB ){ |
- rc = SQLITE_CORRUPT_BKPT; |
+/* |
+** Advance the cursor to the next row in the %_content table that |
+** matches the search criteria. For a MATCH search, this will be |
+** the next row that matches. For a full-table scan, this will be |
+** simply the next row in the %_content table. For a docid lookup, |
+** this routine simply sets the EOF flag. |
+** |
+** Return SQLITE_OK if nothing goes wrong. SQLITE_OK is returned |
+** even if we reach end-of-file. The fts3EofMethod() will be called |
+** subsequently to determine whether or not an EOF was hit. |
+*/ |
+static int fts3NextMethod(sqlite3_vtab_cursor *pCursor){ |
+ int res; |
+ int rc = SQLITE_OK; /* Return code */ |
+ Fts3Cursor *pCsr = (Fts3Cursor *)pCursor; |
+ |
+ pCsr->eEvalmode = FTS3_EVAL_NEXT; |
+ do { |
+ if( pCsr->aDoclist==0 ){ |
+ if( SQLITE_ROW!=sqlite3_step(pCsr->pStmt) ){ |
+ pCsr->isEof = 1; |
+ rc = sqlite3_reset(pCsr->pStmt); |
break; |
} |
- |
- assert( i<nReaders ); |
- rc = leavesReaderInit(v, -1, iStart, iEnd, pRootData, nRootData, |
- &readers[i].reader); |
- if( rc!=SQLITE_OK ) break; |
- |
- readers[i].segment = i; |
- i++; |
- } |
- |
- /* If we managed to successfully read them all, optimize them. */ |
- if( rc==SQLITE_DONE ){ |
- assert( i==nReaders ); |
- rc = optimizeInternal(v, readers, nReaders, &writer); |
+ pCsr->iPrevId = sqlite3_column_int64(pCsr->pStmt, 0); |
}else{ |
- sqlite3_reset(s); /* So we don't leave a lock. */ |
+ if( pCsr->pNextId>=&pCsr->aDoclist[pCsr->nDoclist] ){ |
+ pCsr->isEof = 1; |
+ break; |
+ } |
+ sqlite3_reset(pCsr->pStmt); |
+ fts3GetDeltaVarint(&pCsr->pNextId, &pCsr->iPrevId); |
+ pCsr->isRequireSeek = 1; |
+ pCsr->isMatchinfoNeeded = 1; |
} |
+ }while( SQLITE_OK==(rc = fts3EvalDeferred(pCsr, &res)) && res==0 ); |
- while( i-- > 0 ){ |
- leavesReaderDestroy(&readers[i].reader); |
- } |
- sqlite3_free(readers); |
+ return rc; |
+} |
- /* If we've successfully gotten to here, delete the old segments |
- ** and flush the interior structure of the new segment. |
- */ |
- if( rc==SQLITE_OK ){ |
- for( i=0; i<=iMaxLevel; i++ ){ |
- rc = segdir_delete(v, i); |
- if( rc!=SQLITE_OK ) break; |
+/* |
+** This is the xFilter interface for the virtual table. See |
+** the virtual table xFilter method documentation for additional |
+** information. |
+** |
+** If idxNum==FTS3_FULLSCAN_SEARCH then do a full table scan against |
+** the %_content table. |
+** |
+** If idxNum==FTS3_DOCID_SEARCH then do a docid lookup for a single entry |
+** in the %_content table. |
+** |
+** If idxNum>=FTS3_FULLTEXT_SEARCH then use the full text index. The |
+** column on the left-hand side of the MATCH operator is column |
+** number idxNum-FTS3_FULLTEXT_SEARCH, 0 indexed. argv[0] is the right-hand |
+** side of the MATCH operator. |
+*/ |
+static int fts3FilterMethod( |
+ sqlite3_vtab_cursor *pCursor, /* The cursor used for this query */ |
+ int idxNum, /* Strategy index */ |
+ const char *idxStr, /* Unused */ |
+ int nVal, /* Number of elements in apVal */ |
+ sqlite3_value **apVal /* Arguments for the indexing scheme */ |
+){ |
+ const char *azSql[] = { |
+ "SELECT %s FROM %Q.'%q_content' AS x WHERE docid = ?", /* non-full-scan */ |
+ "SELECT %s FROM %Q.'%q_content' AS x ", /* full-scan */ |
+ }; |
+ int rc; /* Return code */ |
+ char *zSql; /* SQL statement used to access %_content */ |
+ Fts3Table *p = (Fts3Table *)pCursor->pVtab; |
+ Fts3Cursor *pCsr = (Fts3Cursor *)pCursor; |
+ |
+ UNUSED_PARAMETER(idxStr); |
+ UNUSED_PARAMETER(nVal); |
+ |
+ assert( idxNum>=0 && idxNum<=(FTS3_FULLTEXT_SEARCH+p->nColumn) ); |
+ assert( nVal==0 || nVal==1 ); |
+ assert( (nVal==0)==(idxNum==FTS3_FULLSCAN_SEARCH) ); |
+ assert( p->pSegments==0 ); |
+ |
+ /* In case the cursor has been used before, clear it now. */ |
+ sqlite3_finalize(pCsr->pStmt); |
+ sqlite3_free(pCsr->aDoclist); |
+ sqlite3Fts3ExprFree(pCsr->pExpr); |
+ memset(&pCursor[1], 0, sizeof(Fts3Cursor)-sizeof(sqlite3_vtab_cursor)); |
+ |
+ if( idxNum!=FTS3_DOCID_SEARCH && idxNum!=FTS3_FULLSCAN_SEARCH ){ |
+ int iCol = idxNum-FTS3_FULLTEXT_SEARCH; |
+ const char *zQuery = (const char *)sqlite3_value_text(apVal[0]); |
+ |
+ if( zQuery==0 && sqlite3_value_type(apVal[0])!=SQLITE_NULL ){ |
+ return SQLITE_NOMEM; |
+ } |
+ |
+ rc = sqlite3Fts3ExprParse(p->pTokenizer, p->azColumn, p->nColumn, |
+ iCol, zQuery, -1, &pCsr->pExpr |
+ ); |
+ if( rc!=SQLITE_OK ){ |
+ if( rc==SQLITE_ERROR ){ |
+ p->base.zErrMsg = sqlite3_mprintf("malformed MATCH expression: [%s]", |
+ zQuery); |
} |
- |
- if( rc==SQLITE_OK ) rc = leafWriterFinalize(v, &writer); |
+ return rc; |
} |
- leafWriterDestroy(&writer); |
- |
- if( rc!=SQLITE_OK ) goto err; |
+ rc = sqlite3Fts3ReadLock(p); |
+ if( rc!=SQLITE_OK ) return rc; |
- sqlite3_result_text(pContext, "Index optimized", -1, SQLITE_STATIC); |
- return; |
+ rc = fts3EvalExpr(pCsr, pCsr->pExpr, &pCsr->aDoclist, &pCsr->nDoclist, 0); |
+ sqlite3Fts3SegmentsClose(p); |
+ if( rc!=SQLITE_OK ) return rc; |
+ pCsr->pNextId = pCsr->aDoclist; |
+ pCsr->iPrevId = 0; |
+ } |
- /* TODO(shess): Error-handling needs to be improved along the |
- ** lines of the dump_ functions. |
- */ |
- err: |
- { |
- char buf[512]; |
- sqlite3_snprintf(sizeof(buf), buf, "Error in optimize: %s", |
- sqlite3_errmsg(sqlite3_context_db_handle(pContext))); |
- sqlite3_result_error(pContext, buf, -1); |
- } |
+ /* Compile a SELECT statement for this cursor. For a full-table-scan, the |
+ ** statement loops through all rows of the %_content table. For a |
+ ** full-text query or docid lookup, the statement retrieves a single |
+ ** row by docid. |
+ */ |
+ zSql = (char *)azSql[idxNum==FTS3_FULLSCAN_SEARCH]; |
+ zSql = sqlite3_mprintf(zSql, p->zReadExprlist, p->zDb, p->zName); |
+ if( !zSql ){ |
+ rc = SQLITE_NOMEM; |
+ }else{ |
+ rc = sqlite3_prepare_v2(p->db, zSql, -1, &pCsr->pStmt, 0); |
+ sqlite3_free(zSql); |
+ } |
+ if( rc==SQLITE_OK && idxNum==FTS3_DOCID_SEARCH ){ |
+ rc = sqlite3_bind_value(pCsr->pStmt, 1, apVal[0]); |
} |
+ pCsr->eSearch = (i16)idxNum; |
+ |
+ if( rc!=SQLITE_OK ) return rc; |
+ return fts3NextMethod(pCursor); |
} |
-#ifdef SQLITE_TEST |
-/* Generate an error of the form "<prefix>: <msg>". If msg is NULL, |
-** pull the error from the context's db handle. |
+/* |
+** This is the xEof method of the virtual table. SQLite calls this |
+** routine to find out if it has reached the end of a result set. |
*/ |
-static void generateError(sqlite3_context *pContext, |
- const char *prefix, const char *msg){ |
- char buf[512]; |
- if( msg==NULL ) msg = sqlite3_errmsg(sqlite3_context_db_handle(pContext)); |
- sqlite3_snprintf(sizeof(buf), buf, "%s: %s", prefix, msg); |
- sqlite3_result_error(pContext, buf, -1); |
+static int fts3EofMethod(sqlite3_vtab_cursor *pCursor){ |
+ return ((Fts3Cursor *)pCursor)->isEof; |
} |
-/* Helper function to collect the set of terms in the segment into |
-** pTerms. The segment is defined by the leaf nodes between |
-** iStartBlockid and iEndBlockid, inclusive, or by the contents of |
-** pRootData if iStartBlockid is 0 (in which case the entire segment |
-** fit in a leaf). |
+/* |
+** This is the xRowid method. The SQLite core calls this routine to |
+** retrieve the rowid for the current row of the result set. fts3 |
+** exposes %_content.docid as the rowid for the virtual table. The |
+** rowid should be written to *pRowid. |
*/ |
-static int collectSegmentTerms(fulltext_vtab *v, sqlite3_stmt *s, |
- fts3Hash *pTerms){ |
- const sqlite_int64 iStartBlockid = sqlite3_column_int64(s, 0); |
- const sqlite_int64 iEndBlockid = sqlite3_column_int64(s, 1); |
- const char *pRootData = sqlite3_column_blob(s, 2); |
- const int nRootData = sqlite3_column_bytes(s, 2); |
- int rc; |
- LeavesReader reader; |
- |
- /* Corrupt if we get back different types than we stored. */ |
- if( sqlite3_column_type(s, 0)!=SQLITE_INTEGER || |
- sqlite3_column_type(s, 1)!=SQLITE_INTEGER || |
- sqlite3_column_type(s, 2)!=SQLITE_BLOB ){ |
- return SQLITE_CORRUPT_BKPT; |
+static int fts3RowidMethod(sqlite3_vtab_cursor *pCursor, sqlite_int64 *pRowid){ |
+ Fts3Cursor *pCsr = (Fts3Cursor *) pCursor; |
+ if( pCsr->aDoclist ){ |
+ *pRowid = pCsr->iPrevId; |
+ }else{ |
+ /* This branch runs if the query is implemented using a full-table scan |
+ ** (not using the full-text index). In this case grab the rowid from the |
+ ** SELECT statement. |
+ */ |
+ assert( pCsr->isRequireSeek==0 ); |
+ *pRowid = sqlite3_column_int64(pCsr->pStmt, 0); |
} |
+ return SQLITE_OK; |
+} |
- rc = leavesReaderInit(v, 0, iStartBlockid, iEndBlockid, |
- pRootData, nRootData, &reader); |
- if( rc!=SQLITE_OK ) return rc; |
+/* |
+** This is the xColumn method, called by SQLite to request a value from |
+** the row that the supplied cursor currently points to. |
+*/ |
+static int fts3ColumnMethod( |
+ sqlite3_vtab_cursor *pCursor, /* Cursor to retrieve value from */ |
+ sqlite3_context *pContext, /* Context for sqlite3_result_xxx() calls */ |
+ int iCol /* Index of column to read value from */ |
+){ |
+ int rc; /* Return Code */ |
+ Fts3Cursor *pCsr = (Fts3Cursor *) pCursor; |
+ Fts3Table *p = (Fts3Table *)pCursor->pVtab; |
- while( rc==SQLITE_OK && !leavesReaderAtEnd(&reader) ){ |
- const char *pTerm = leavesReaderTerm(&reader); |
- const int nTerm = leavesReaderTermBytes(&reader); |
- void *oldValue = sqlite3Fts3HashFind(pTerms, pTerm, nTerm); |
- void *newValue = (void *)((char *)oldValue+1); |
+ /* The column value supplied by SQLite must be in range. */ |
+ assert( iCol>=0 && iCol<=p->nColumn+1 ); |
- /* From the comment before sqlite3Fts3HashInsert in fts3_hash.c, |
- ** the data value passed is returned in case of malloc failure. |
+ if( iCol==p->nColumn+1 ){ |
+ /* This call is a request for the "docid" column. Since "docid" is an |
+ ** alias for "rowid", use the xRowid() method to obtain the value. |
*/ |
- if( newValue==sqlite3Fts3HashInsert(pTerms, pTerm, nTerm, newValue) ){ |
- rc = SQLITE_NOMEM; |
- }else{ |
- rc = leavesReaderStep(v, &reader); |
+ sqlite3_int64 iRowid; |
+ rc = fts3RowidMethod(pCursor, &iRowid); |
+ sqlite3_result_int64(pContext, iRowid); |
+ }else if( iCol==p->nColumn ){ |
+ /* The extra column whose name is the same as the table. |
+ ** Return a blob which is a pointer to the cursor. |
+ */ |
+ sqlite3_result_blob(pContext, &pCsr, sizeof(pCsr), SQLITE_TRANSIENT); |
+ rc = SQLITE_OK; |
+ }else{ |
+ rc = fts3CursorSeek(0, pCsr); |
+ if( rc==SQLITE_OK ){ |
+ sqlite3_result_value(pContext, sqlite3_column_value(pCsr->pStmt, iCol+1)); |
} |
} |
- |
- leavesReaderDestroy(&reader); |
return rc; |
} |
-/* Helper function to build the result string for dump_terms(). */ |
-static int generateTermsResult(sqlite3_context *pContext, fts3Hash *pTerms){ |
- int iTerm, nTerms, nResultBytes, iByte; |
- char *result; |
- TermData *pData; |
- fts3HashElem *e; |
- |
- /* Iterate pTerms to generate an array of terms in pData for |
- ** sorting. |
- */ |
- nTerms = fts3HashCount(pTerms); |
- assert( nTerms>0 ); |
- pData = sqlite3_malloc(nTerms*sizeof(TermData)); |
- if( pData==NULL ) return SQLITE_NOMEM; |
- |
- nResultBytes = 0; |
- for(iTerm = 0, e = fts3HashFirst(pTerms); e; iTerm++, e = fts3HashNext(e)){ |
- nResultBytes += fts3HashKeysize(e)+1; /* Term plus trailing space */ |
- assert( iTerm<nTerms ); |
- pData[iTerm].pTerm = fts3HashKey(e); |
- pData[iTerm].nTerm = fts3HashKeysize(e); |
- pData[iTerm].pCollector = fts3HashData(e); /* unused */ |
- } |
- assert( iTerm==nTerms ); |
+/* |
+** This function is the implementation of the xUpdate callback used by |
+** FTS3 virtual tables. It is invoked by SQLite each time a row is to be |
+** inserted, updated or deleted. |
+*/ |
+static int fts3UpdateMethod( |
+ sqlite3_vtab *pVtab, /* Virtual table handle */ |
+ int nArg, /* Size of argument array */ |
+ sqlite3_value **apVal, /* Array of arguments */ |
+ sqlite_int64 *pRowid /* OUT: The affected (or effected) rowid */ |
+){ |
+ return sqlite3Fts3UpdateMethod(pVtab, nArg, apVal, pRowid); |
+} |
- assert( nResultBytes>0 ); /* nTerms>0, nResultsBytes must be, too. */ |
- result = sqlite3_malloc(nResultBytes); |
- if( result==NULL ){ |
- sqlite3_free(pData); |
- return SQLITE_NOMEM; |
- } |
+/* |
+** Implementation of xSync() method. Flush the contents of the pending-terms |
+** hash-table to the database. |
+*/ |
+static int fts3SyncMethod(sqlite3_vtab *pVtab){ |
+ int rc = sqlite3Fts3PendingTermsFlush((Fts3Table *)pVtab); |
+ sqlite3Fts3SegmentsClose((Fts3Table *)pVtab); |
+ return rc; |
+} |
- if( nTerms>1 ) qsort(pData, nTerms, sizeof(*pData), termDataCmp); |
+/* |
+** Implementation of xBegin() method. This is a no-op. |
+*/ |
+static int fts3BeginMethod(sqlite3_vtab *pVtab){ |
+ UNUSED_PARAMETER(pVtab); |
+ assert( ((Fts3Table *)pVtab)->nPendingData==0 ); |
+ return SQLITE_OK; |
+} |
- /* Read the terms in order to build the result. */ |
- iByte = 0; |
- for(iTerm=0; iTerm<nTerms; ++iTerm){ |
- memcpy(result+iByte, pData[iTerm].pTerm, pData[iTerm].nTerm); |
- iByte += pData[iTerm].nTerm; |
- result[iByte++] = ' '; |
- } |
- assert( iByte==nResultBytes ); |
- assert( result[nResultBytes-1]==' ' ); |
- result[nResultBytes-1] = '\0'; |
+/* |
+** Implementation of xCommit() method. This is a no-op. The contents of |
+** the pending-terms hash-table have already been flushed into the database |
+** by fts3SyncMethod(). |
+*/ |
+static int fts3CommitMethod(sqlite3_vtab *pVtab){ |
+ UNUSED_PARAMETER(pVtab); |
+ assert( ((Fts3Table *)pVtab)->nPendingData==0 ); |
+ return SQLITE_OK; |
+} |
- /* Passes away ownership of result. */ |
- sqlite3_result_text(pContext, result, nResultBytes-1, sqlite3_free); |
- sqlite3_free(pData); |
+/* |
+** Implementation of xRollback(). Discard the contents of the pending-terms |
+** hash-table. Any changes made to the database are reverted by SQLite. |
+*/ |
+static int fts3RollbackMethod(sqlite3_vtab *pVtab){ |
+ sqlite3Fts3PendingTermsClear((Fts3Table *)pVtab); |
return SQLITE_OK; |
} |
-/* Implements dump_terms() for use in inspecting the fts3 index from |
-** tests. TEXT result containing the ordered list of terms joined by |
-** spaces. dump_terms(t, level, idx) dumps the terms for the segment |
-** specified by level, idx (in %_segdir), while dump_terms(t) dumps |
-** all terms in the index. In both cases t is the fts table's magic |
-** table-named column. |
+/* |
+** Load the doclist associated with expression pExpr to pExpr->aDoclist. |
+** The loaded doclist contains positions as well as the document ids. |
+** This is used by the matchinfo(), snippet() and offsets() auxillary |
+** functions. |
*/ |
-static void dumpTermsFunc( |
- sqlite3_context *pContext, |
- int argc, sqlite3_value **argv |
-){ |
- fulltext_cursor *pCursor; |
- if( argc!=3 && argc!=1 ){ |
- generateError(pContext, "dump_terms", "incorrect arguments"); |
- }else if( sqlite3_value_type(argv[0])!=SQLITE_BLOB || |
- sqlite3_value_bytes(argv[0])!=sizeof(pCursor) ){ |
- generateError(pContext, "dump_terms", "illegal first argument"); |
- }else{ |
- fulltext_vtab *v; |
- fts3Hash terms; |
- sqlite3_stmt *s = NULL; |
- int rc; |
+int sqlite3Fts3ExprLoadDoclist(Fts3Cursor *pCsr, Fts3Expr *pExpr){ |
+ int rc; |
+ assert( pExpr->eType==FTSQUERY_PHRASE && pExpr->pPhrase ); |
+ assert( pCsr->eEvalmode==FTS3_EVAL_NEXT ); |
+ rc = fts3EvalExpr(pCsr, pExpr, &pExpr->aDoclist, &pExpr->nDoclist, 1); |
+ return rc; |
+} |
- memcpy(&pCursor, sqlite3_value_blob(argv[0]), sizeof(pCursor)); |
- v = cursor_vtab(pCursor); |
+int sqlite3Fts3ExprLoadFtDoclist( |
+ Fts3Cursor *pCsr, |
+ Fts3Expr *pExpr, |
+ char **paDoclist, |
+ int *pnDoclist |
+){ |
+ int rc; |
+ assert( pCsr->eEvalmode==FTS3_EVAL_NEXT ); |
+ assert( pExpr->eType==FTSQUERY_PHRASE && pExpr->pPhrase ); |
+ pCsr->eEvalmode = FTS3_EVAL_MATCHINFO; |
+ rc = fts3EvalExpr(pCsr, pExpr, paDoclist, pnDoclist, 1); |
+ pCsr->eEvalmode = FTS3_EVAL_NEXT; |
+ return rc; |
+} |
- /* If passed only the cursor column, get all segments. Otherwise |
- ** get the segment described by the following two arguments. |
- */ |
- if( argc==1 ){ |
- rc = sql_get_statement(v, SEGDIR_SELECT_ALL_STMT, &s); |
- }else{ |
- rc = sql_get_statement(v, SEGDIR_SELECT_SEGMENT_STMT, &s); |
- if( rc==SQLITE_OK ){ |
- rc = sqlite3_bind_int(s, 1, sqlite3_value_int(argv[1])); |
- if( rc==SQLITE_OK ){ |
- rc = sqlite3_bind_int(s, 2, sqlite3_value_int(argv[2])); |
+/* |
+** After ExprLoadDoclist() (see above) has been called, this function is |
+** used to iterate/search through the position lists that make up the doclist |
+** stored in pExpr->aDoclist. |
+*/ |
+char *sqlite3Fts3FindPositions( |
+ Fts3Expr *pExpr, /* Access this expressions doclist */ |
+ sqlite3_int64 iDocid, /* Docid associated with requested pos-list */ |
+ int iCol /* Column of requested pos-list */ |
+){ |
+ assert( pExpr->isLoaded ); |
+ if( pExpr->aDoclist ){ |
+ char *pEnd = &pExpr->aDoclist[pExpr->nDoclist]; |
+ char *pCsr; |
+ |
+ if( pExpr->pCurrent==0 ){ |
+ pExpr->pCurrent = pExpr->aDoclist; |
+ pExpr->iCurrent = 0; |
+ pExpr->pCurrent += sqlite3Fts3GetVarint(pExpr->pCurrent,&pExpr->iCurrent); |
+ } |
+ pCsr = pExpr->pCurrent; |
+ assert( pCsr ); |
+ |
+ while( pCsr<pEnd ){ |
+ if( pExpr->iCurrent<iDocid ){ |
+ fts3PoslistCopy(0, &pCsr); |
+ if( pCsr<pEnd ){ |
+ fts3GetDeltaVarint(&pCsr, &pExpr->iCurrent); |
+ } |
+ pExpr->pCurrent = pCsr; |
+ }else{ |
+ if( pExpr->iCurrent==iDocid ){ |
+ int iThis = 0; |
+ if( iCol<0 ){ |
+ /* If iCol is negative, return a pointer to the start of the |
+ ** position-list (instead of a pointer to the start of a list |
+ ** of offsets associated with a specific column). |
+ */ |
+ return pCsr; |
+ } |
+ while( iThis<iCol ){ |
+ fts3ColumnlistCopy(0, &pCsr); |
+ if( *pCsr==0x00 ) return 0; |
+ pCsr++; |
+ pCsr += sqlite3Fts3GetVarint32(pCsr, &iThis); |
+ } |
+ if( iCol==iThis && (*pCsr&0xFE) ) return pCsr; |
} |
+ return 0; |
} |
} |
+ } |
- if( rc!=SQLITE_OK ){ |
- generateError(pContext, "dump_terms", NULL); |
- return; |
- } |
- |
- /* Collect the terms for each segment. */ |
- sqlite3Fts3HashInit(&terms, FTS3_HASH_STRING, 1); |
- while( (rc = sqlite3_step(s))==SQLITE_ROW ){ |
- rc = collectSegmentTerms(v, s, &terms); |
- if( rc!=SQLITE_OK ) break; |
- } |
+ return 0; |
+} |
- if( rc!=SQLITE_DONE ){ |
- sqlite3_reset(s); |
- generateError(pContext, "dump_terms", NULL); |
- }else{ |
- const int nTerms = fts3HashCount(&terms); |
- if( nTerms>0 ){ |
- rc = generateTermsResult(pContext, &terms); |
- if( rc==SQLITE_NOMEM ){ |
- generateError(pContext, "dump_terms", "out of memory"); |
- }else{ |
- assert( rc==SQLITE_OK ); |
- } |
- }else if( argc==3 ){ |
- /* The specific segment asked for could not be found. */ |
- generateError(pContext, "dump_terms", "segment not found"); |
- }else{ |
- /* No segments found. */ |
- /* TODO(shess): It should be impossible to reach this. This |
- ** case can only happen for an empty table, in which case |
- ** SQLite has no rows to call this function on. |
- */ |
- sqlite3_result_null(pContext); |
- } |
- } |
- sqlite3Fts3HashClear(&terms); |
+/* |
+** Helper function used by the implementation of the overloaded snippet(), |
+** offsets() and optimize() SQL functions. |
+** |
+** If the value passed as the third argument is a blob of size |
+** sizeof(Fts3Cursor*), then the blob contents are copied to the |
+** output variable *ppCsr and SQLITE_OK is returned. Otherwise, an error |
+** message is written to context pContext and SQLITE_ERROR returned. The |
+** string passed via zFunc is used as part of the error message. |
+*/ |
+static int fts3FunctionArg( |
+ sqlite3_context *pContext, /* SQL function call context */ |
+ const char *zFunc, /* Function name */ |
+ sqlite3_value *pVal, /* argv[0] passed to function */ |
+ Fts3Cursor **ppCsr /* OUT: Store cursor handle here */ |
+){ |
+ Fts3Cursor *pRet; |
+ if( sqlite3_value_type(pVal)!=SQLITE_BLOB |
+ || sqlite3_value_bytes(pVal)!=sizeof(Fts3Cursor *) |
+ ){ |
+ char *zErr = sqlite3_mprintf("illegal first argument to %s", zFunc); |
+ sqlite3_result_error(pContext, zErr, -1); |
+ sqlite3_free(zErr); |
+ return SQLITE_ERROR; |
} |
+ memcpy(&pRet, sqlite3_value_blob(pVal), sizeof(Fts3Cursor *)); |
+ *ppCsr = pRet; |
+ return SQLITE_OK; |
} |
-/* Expand the DL_DEFAULT doclist in pData into a text result in |
-** pContext. |
+/* |
+** Implementation of the snippet() function for FTS3 |
*/ |
-static void createDoclistResult(sqlite3_context *pContext, |
- const char *pData, int nData){ |
- DataBuffer dump; |
- DLReader dlReader; |
- int rc; |
- |
- assert( pData!=NULL && nData>0 ); |
- |
- rc = dlrInit(&dlReader, DL_DEFAULT, pData, nData); |
- if( rc!=SQLITE_OK ) return rc; |
- dataBufferInit(&dump, 0); |
- for( ; rc==SQLITE_OK && !dlrAtEnd(&dlReader); rc = dlrStep(&dlReader) ){ |
- char buf[256]; |
- PLReader plReader; |
+static void fts3SnippetFunc( |
+ sqlite3_context *pContext, /* SQLite function call context */ |
+ int nVal, /* Size of apVal[] array */ |
+ sqlite3_value **apVal /* Array of arguments */ |
+){ |
+ Fts3Cursor *pCsr; /* Cursor handle passed through apVal[0] */ |
+ const char *zStart = "<b>"; |
+ const char *zEnd = "</b>"; |
+ const char *zEllipsis = "<b>...</b>"; |
+ int iCol = -1; |
+ int nToken = 15; /* Default number of tokens in snippet */ |
+ |
+ /* There must be at least one argument passed to this function (otherwise |
+ ** the non-overloaded version would have been called instead of this one). |
+ */ |
+ assert( nVal>=1 ); |
- rc = plrInit(&plReader, &dlReader); |
- if( rc!=SQLITE_OK ) break; |
- if( DL_DEFAULT==DL_DOCIDS || plrAtEnd(&plReader) ){ |
- sqlite3_snprintf(sizeof(buf), buf, "[%lld] ", dlrDocid(&dlReader)); |
- dataBufferAppend(&dump, buf, strlen(buf)); |
- }else{ |
- int iColumn = plrColumn(&plReader); |
- |
- sqlite3_snprintf(sizeof(buf), buf, "[%lld %d[", |
- dlrDocid(&dlReader), iColumn); |
- dataBufferAppend(&dump, buf, strlen(buf)); |
- |
- for( ; !plrAtEnd(&plReader); rc = plrStep(&plReader) ){ |
- if( rc!=SQLITE_OK ) break; |
- if( plrColumn(&plReader)!=iColumn ){ |
- iColumn = plrColumn(&plReader); |
- sqlite3_snprintf(sizeof(buf), buf, "] %d[", iColumn); |
- assert( dump.nData>0 ); |
- dump.nData--; /* Overwrite trailing space. */ |
- assert( dump.pData[dump.nData]==' '); |
- dataBufferAppend(&dump, buf, strlen(buf)); |
- } |
- if( DL_DEFAULT==DL_POSITIONS_OFFSETS ){ |
- sqlite3_snprintf(sizeof(buf), buf, "%d,%d,%d ", |
- plrPosition(&plReader), |
- plrStartOffset(&plReader), plrEndOffset(&plReader)); |
- }else if( DL_DEFAULT==DL_POSITIONS ){ |
- sqlite3_snprintf(sizeof(buf), buf, "%d ", plrPosition(&plReader)); |
- }else{ |
- assert( NULL=="Unhandled DL_DEFAULT value"); |
- } |
- dataBufferAppend(&dump, buf, strlen(buf)); |
- } |
- plrDestroy(&plReader); |
- if( rc!= SQLITE_OK ) break; |
+ if( nVal>6 ){ |
+ sqlite3_result_error(pContext, |
+ "wrong number of arguments to function snippet()", -1); |
+ return; |
+ } |
+ if( fts3FunctionArg(pContext, "snippet", apVal[0], &pCsr) ) return; |
- assert( dump.nData>0 ); |
- dump.nData--; /* Overwrite trailing space. */ |
- assert( dump.pData[dump.nData]==' '); |
- dataBufferAppend(&dump, "]] ", 3); |
- } |
+ switch( nVal ){ |
+ case 6: nToken = sqlite3_value_int(apVal[5]); |
+ case 5: iCol = sqlite3_value_int(apVal[4]); |
+ case 4: zEllipsis = (const char*)sqlite3_value_text(apVal[3]); |
+ case 3: zEnd = (const char*)sqlite3_value_text(apVal[2]); |
+ case 2: zStart = (const char*)sqlite3_value_text(apVal[1]); |
} |
- dlrDestroy(&dlReader); |
- if( rc!=SQLITE_OK ){ |
- dataBufferDestroy(&dump); |
- return rc; |
+ if( !zEllipsis || !zEnd || !zStart ){ |
+ sqlite3_result_error_nomem(pContext); |
+ }else if( SQLITE_OK==fts3CursorSeek(pContext, pCsr) ){ |
+ sqlite3Fts3Snippet(pContext, pCsr, zStart, zEnd, zEllipsis, iCol, nToken); |
} |
+} |
+ |
+/* |
+** Implementation of the offsets() function for FTS3 |
+*/ |
+static void fts3OffsetsFunc( |
+ sqlite3_context *pContext, /* SQLite function call context */ |
+ int nVal, /* Size of argument array */ |
+ sqlite3_value **apVal /* Array of arguments */ |
+){ |
+ Fts3Cursor *pCsr; /* Cursor handle passed through apVal[0] */ |
- assert( dump.nData>0 ); |
- dump.nData--; /* Overwrite trailing space. */ |
- assert( dump.pData[dump.nData]==' '); |
- dump.pData[dump.nData] = '\0'; |
- assert( dump.nData>0 ); |
+ UNUSED_PARAMETER(nVal); |
- /* Passes ownership of dump's buffer to pContext. */ |
- sqlite3_result_text(pContext, dump.pData, dump.nData, sqlite3_free); |
- dump.pData = NULL; |
- dump.nData = dump.nCapacity = 0; |
- return SQLITE_OK; |
+ assert( nVal==1 ); |
+ if( fts3FunctionArg(pContext, "offsets", apVal[0], &pCsr) ) return; |
+ assert( pCsr ); |
+ if( SQLITE_OK==fts3CursorSeek(pContext, pCsr) ){ |
+ sqlite3Fts3Offsets(pContext, pCsr); |
+ } |
} |
-/* Implements dump_doclist() for use in inspecting the fts3 index from |
-** tests. TEXT result containing a string representation of the |
-** doclist for the indicated term. dump_doclist(t, term, level, idx) |
-** dumps the doclist for term from the segment specified by level, idx |
-** (in %_segdir), while dump_doclist(t, term) dumps the logical |
-** doclist for the term across all segments. The per-segment doclist |
-** can contain deletions, while the full-index doclist will not |
-** (deletions are omitted). |
-** |
-** Result formats differ with the setting of DL_DEFAULTS. Examples: |
+/* |
+** Implementation of the special optimize() function for FTS3. This |
+** function merges all segments in the database to a single segment. |
+** Example usage is: |
** |
-** DL_DOCIDS: [1] [3] [7] |
-** DL_POSITIONS: [1 0[0 4] 1[17]] [3 1[5]] |
-** DL_POSITIONS_OFFSETS: [1 0[0,0,3 4,23,26] 1[17,102,105]] [3 1[5,20,23]] |
+** SELECT optimize(t) FROM t LIMIT 1; |
** |
-** In each case the number after the outer '[' is the docid. In the |
-** latter two cases, the number before the inner '[' is the column |
-** associated with the values within. For DL_POSITIONS the numbers |
-** within are the positions, for DL_POSITIONS_OFFSETS they are the |
-** position, the start offset, and the end offset. |
+** where 't' is the name of an FTS3 table. |
*/ |
-static void dumpDoclistFunc( |
- sqlite3_context *pContext, |
- int argc, sqlite3_value **argv |
+static void fts3OptimizeFunc( |
+ sqlite3_context *pContext, /* SQLite function call context */ |
+ int nVal, /* Size of argument array */ |
+ sqlite3_value **apVal /* Array of arguments */ |
){ |
- fulltext_cursor *pCursor; |
- if( argc!=2 && argc!=4 ){ |
- generateError(pContext, "dump_doclist", "incorrect arguments"); |
- }else if( sqlite3_value_type(argv[0])!=SQLITE_BLOB || |
- sqlite3_value_bytes(argv[0])!=sizeof(pCursor) ){ |
- generateError(pContext, "dump_doclist", "illegal first argument"); |
- }else if( sqlite3_value_text(argv[1])==NULL || |
- sqlite3_value_text(argv[1])[0]=='\0' ){ |
- generateError(pContext, "dump_doclist", "empty second argument"); |
- }else{ |
- const char *pTerm = (const char *)sqlite3_value_text(argv[1]); |
- const int nTerm = strlen(pTerm); |
- fulltext_vtab *v; |
- int rc; |
- DataBuffer doclist; |
- |
- memcpy(&pCursor, sqlite3_value_blob(argv[0]), sizeof(pCursor)); |
- v = cursor_vtab(pCursor); |
- |
- dataBufferInit(&doclist, 0); |
- |
- /* termSelect() yields the same logical doclist that queries are |
- ** run against. |
- */ |
- if( argc==2 ){ |
- rc = termSelect(v, v->nColumn, pTerm, nTerm, 0, DL_DEFAULT, &doclist); |
- }else{ |
- sqlite3_stmt *s = NULL; |
+ int rc; /* Return code */ |
+ Fts3Table *p; /* Virtual table handle */ |
+ Fts3Cursor *pCursor; /* Cursor handle passed through apVal[0] */ |
- /* Get our specific segment's information. */ |
- rc = sql_get_statement(v, SEGDIR_SELECT_SEGMENT_STMT, &s); |
- if( rc==SQLITE_OK ){ |
- rc = sqlite3_bind_int(s, 1, sqlite3_value_int(argv[2])); |
- if( rc==SQLITE_OK ){ |
- rc = sqlite3_bind_int(s, 2, sqlite3_value_int(argv[3])); |
- } |
- } |
- |
- if( rc==SQLITE_OK ){ |
- rc = sqlite3_step(s); |
+ UNUSED_PARAMETER(nVal); |
- if( rc==SQLITE_DONE ){ |
- dataBufferDestroy(&doclist); |
- generateError(pContext, "dump_doclist", "segment not found"); |
- return; |
- } |
+ assert( nVal==1 ); |
+ if( fts3FunctionArg(pContext, "optimize", apVal[0], &pCursor) ) return; |
+ p = (Fts3Table *)pCursor->base.pVtab; |
+ assert( p ); |
- /* Found a segment, load it into doclist. */ |
- if( rc==SQLITE_ROW ){ |
- const sqlite_int64 iLeavesEnd = sqlite3_column_int64(s, 1); |
- const char *pData = sqlite3_column_blob(s, 2); |
- const int nData = sqlite3_column_bytes(s, 2); |
- |
- /* loadSegment() is used by termSelect() to load each |
- ** segment's data. |
- */ |
- rc = loadSegment(v, pData, nData, iLeavesEnd, pTerm, nTerm, 0, |
- &doclist); |
- if( rc==SQLITE_OK ){ |
- rc = sqlite3_step(s); |
- |
- /* Should not have more than one matching segment. */ |
- if( rc!=SQLITE_DONE ){ |
- sqlite3_reset(s); |
- dataBufferDestroy(&doclist); |
- generateError(pContext, "dump_doclist", "invalid segdir"); |
- return; |
- } |
- rc = SQLITE_OK; |
- } |
- } |
- } |
+ rc = sqlite3Fts3Optimize(p); |
- sqlite3_reset(s); |
- } |
+ switch( rc ){ |
+ case SQLITE_OK: |
+ sqlite3_result_text(pContext, "Index optimized", -1, SQLITE_STATIC); |
+ break; |
+ case SQLITE_DONE: |
+ sqlite3_result_text(pContext, "Index already optimal", -1, SQLITE_STATIC); |
+ break; |
+ default: |
+ sqlite3_result_error_code(pContext, rc); |
+ break; |
+ } |
+} |
- if( rc==SQLITE_OK ){ |
- if( doclist.nData>0 ){ |
- createDoclistResult(pContext, doclist.pData, doclist.nData); |
- }else{ |
- /* TODO(shess): This can happen if the term is not present, or |
- ** if all instances of the term have been deleted and this is |
- ** an all-index dump. It may be interesting to distinguish |
- ** these cases. |
- */ |
- sqlite3_result_text(pContext, "", 0, SQLITE_STATIC); |
- } |
- }else if( rc==SQLITE_NOMEM ){ |
- /* Handle out-of-memory cases specially because if they are |
- ** generated in fts3 code they may not be reflected in the db |
- ** handle. |
- */ |
- /* TODO(shess): Handle this more comprehensively. |
- ** sqlite3ErrStr() has what I need, but is internal. |
- */ |
- generateError(pContext, "dump_doclist", "out of memory"); |
- }else{ |
- generateError(pContext, "dump_doclist", NULL); |
+/* |
+** Implementation of the matchinfo() function for FTS3 |
+*/ |
+static void fts3MatchinfoFunc( |
+ sqlite3_context *pContext, /* SQLite function call context */ |
+ int nVal, /* Size of argument array */ |
+ sqlite3_value **apVal /* Array of arguments */ |
+){ |
+ Fts3Cursor *pCsr; /* Cursor handle passed through apVal[0] */ |
+ assert( nVal==1 || nVal==2 ); |
+ if( SQLITE_OK==fts3FunctionArg(pContext, "matchinfo", apVal[0], &pCsr) ){ |
+ const char *zArg = 0; |
+ if( nVal>1 ){ |
+ zArg = (const char *)sqlite3_value_text(apVal[1]); |
} |
- |
- dataBufferDestroy(&doclist); |
+ sqlite3Fts3Matchinfo(pContext, pCsr, zArg); |
} |
} |
-#endif |
/* |
** This routine implements the xFindFunction method for the FTS3 |
** virtual table. |
*/ |
-static int fulltextFindFunction( |
- sqlite3_vtab *pVtab, |
- int nArg, |
- const char *zName, |
- void (**pxFunc)(sqlite3_context*,int,sqlite3_value**), |
- void **ppArg |
+static int fts3FindFunctionMethod( |
+ sqlite3_vtab *pVtab, /* Virtual table handle */ |
+ int nArg, /* Number of SQL function arguments */ |
+ const char *zName, /* Name of SQL function */ |
+ void (**pxFunc)(sqlite3_context*,int,sqlite3_value**), /* OUT: Result */ |
+ void **ppArg /* Unused */ |
){ |
- if( strcmp(zName,"snippet")==0 ){ |
- *pxFunc = snippetFunc; |
- return 1; |
- }else if( strcmp(zName,"offsets")==0 ){ |
- *pxFunc = snippetOffsetsFunc; |
- return 1; |
- }else if( strcmp(zName,"optimize")==0 ){ |
- *pxFunc = optimizeFunc; |
- return 1; |
-#ifdef SQLITE_TEST |
- /* NOTE(shess): These functions are present only for testing |
- ** purposes. No particular effort is made to optimize their |
- ** execution or how they build their results. |
- */ |
- }else if( strcmp(zName,"dump_terms")==0 ){ |
- /* fprintf(stderr, "Found dump_terms\n"); */ |
- *pxFunc = dumpTermsFunc; |
- return 1; |
- }else if( strcmp(zName,"dump_doclist")==0 ){ |
- /* fprintf(stderr, "Found dump_doclist\n"); */ |
- *pxFunc = dumpDoclistFunc; |
- return 1; |
-#endif |
+ struct Overloaded { |
+ const char *zName; |
+ void (*xFunc)(sqlite3_context*,int,sqlite3_value**); |
+ } aOverload[] = { |
+ { "snippet", fts3SnippetFunc }, |
+ { "offsets", fts3OffsetsFunc }, |
+ { "optimize", fts3OptimizeFunc }, |
+ { "matchinfo", fts3MatchinfoFunc }, |
+ }; |
+ int i; /* Iterator variable */ |
+ |
+ UNUSED_PARAMETER(pVtab); |
+ UNUSED_PARAMETER(nArg); |
+ UNUSED_PARAMETER(ppArg); |
+ |
+ for(i=0; i<SizeofArray(aOverload); i++){ |
+ if( strcmp(zName, aOverload[i].zName)==0 ){ |
+ *pxFunc = aOverload[i].xFunc; |
+ return 1; |
+ } |
} |
+ |
+ /* No function of the specified name was found. Return 0. */ |
return 0; |
} |
/* |
-** Rename an fts3 table. |
+** Implementation of FTS3 xRename method. Rename an fts3 table. |
*/ |
-static int fulltextRename( |
- sqlite3_vtab *pVtab, |
- const char *zName |
+static int fts3RenameMethod( |
+ sqlite3_vtab *pVtab, /* Virtual table handle */ |
+ const char *zName /* New name of table */ |
){ |
- fulltext_vtab *p = (fulltext_vtab *)pVtab; |
- int rc = SQLITE_NOMEM; |
- char *zSql = sqlite3_mprintf( |
- "ALTER TABLE %Q.'%q_content' RENAME TO '%q_content';" |
- "ALTER TABLE %Q.'%q_segments' RENAME TO '%q_segments';" |
- "ALTER TABLE %Q.'%q_segdir' RENAME TO '%q_segdir';" |
- , p->zDb, p->zName, zName |
- , p->zDb, p->zName, zName |
- , p->zDb, p->zName, zName |
+ Fts3Table *p = (Fts3Table *)pVtab; |
+ sqlite3 *db = p->db; /* Database connection */ |
+ int rc; /* Return Code */ |
+ |
+ rc = sqlite3Fts3PendingTermsFlush(p); |
+ if( rc!=SQLITE_OK ){ |
+ return rc; |
+ } |
+ |
+ fts3DbExec(&rc, db, |
+ "ALTER TABLE %Q.'%q_content' RENAME TO '%q_content';", |
+ p->zDb, p->zName, zName |
); |
- if( zSql ){ |
- rc = sqlite3_exec(p->db, zSql, 0, 0, 0); |
- sqlite3_free(zSql); |
+ if( p->bHasDocsize ){ |
+ fts3DbExec(&rc, db, |
+ "ALTER TABLE %Q.'%q_docsize' RENAME TO '%q_docsize';", |
+ p->zDb, p->zName, zName |
+ ); |
+ } |
+ if( p->bHasStat ){ |
+ fts3DbExec(&rc, db, |
+ "ALTER TABLE %Q.'%q_stat' RENAME TO '%q_stat';", |
+ p->zDb, p->zName, zName |
+ ); |
} |
+ fts3DbExec(&rc, db, |
+ "ALTER TABLE %Q.'%q_segments' RENAME TO '%q_segments';", |
+ p->zDb, p->zName, zName |
+ ); |
+ fts3DbExec(&rc, db, |
+ "ALTER TABLE %Q.'%q_segdir' RENAME TO '%q_segdir';", |
+ p->zDb, p->zName, zName |
+ ); |
return rc; |
} |
static const sqlite3_module fts3Module = { |
/* iVersion */ 0, |
- /* xCreate */ fulltextCreate, |
- /* xConnect */ fulltextConnect, |
- /* xBestIndex */ fulltextBestIndex, |
- /* xDisconnect */ fulltextDisconnect, |
- /* xDestroy */ fulltextDestroy, |
- /* xOpen */ fulltextOpen, |
- /* xClose */ fulltextClose, |
- /* xFilter */ fulltextFilter, |
- /* xNext */ fulltextNext, |
- /* xEof */ fulltextEof, |
- /* xColumn */ fulltextColumn, |
- /* xRowid */ fulltextRowid, |
- /* xUpdate */ fulltextUpdate, |
- /* xBegin */ fulltextBegin, |
- /* xSync */ fulltextSync, |
- /* xCommit */ fulltextCommit, |
- /* xRollback */ fulltextRollback, |
- /* xFindFunction */ fulltextFindFunction, |
- /* xRename */ fulltextRename, |
+ /* xCreate */ fts3CreateMethod, |
+ /* xConnect */ fts3ConnectMethod, |
+ /* xBestIndex */ fts3BestIndexMethod, |
+ /* xDisconnect */ fts3DisconnectMethod, |
+ /* xDestroy */ fts3DestroyMethod, |
+ /* xOpen */ fts3OpenMethod, |
+ /* xClose */ fts3CloseMethod, |
+ /* xFilter */ fts3FilterMethod, |
+ /* xNext */ fts3NextMethod, |
+ /* xEof */ fts3EofMethod, |
+ /* xColumn */ fts3ColumnMethod, |
+ /* xRowid */ fts3RowidMethod, |
+ /* xUpdate */ fts3UpdateMethod, |
+ /* xBegin */ fts3BeginMethod, |
+ /* xSync */ fts3SyncMethod, |
+ /* xCommit */ fts3CommitMethod, |
+ /* xRollback */ fts3RollbackMethod, |
+ /* xFindFunction */ fts3FindFunctionMethod, |
+ /* xRename */ fts3RenameMethod, |
}; |
+/* |
+** This function is registered as the module destructor (called when an |
+** FTS3 enabled database connection is closed). It frees the memory |
+** allocated for the tokenizer hash table. |
+*/ |
static void hashDestroy(void *p){ |
- fts3Hash *pHash = (fts3Hash *)p; |
+ Fts3Hash *pHash = (Fts3Hash *)p; |
sqlite3Fts3HashClear(pHash); |
sqlite3_free(pHash); |
} |
/* |
-** The fts3 built-in tokenizers - "simple" and "porter" - are implemented |
-** in files fts3_tokenizer1.c and fts3_porter.c respectively. The following |
-** two forward declarations are for functions declared in these files |
-** used to retrieve the respective implementations. |
+** The fts3 built-in tokenizers - "simple", "porter" and "icu"- are |
+** implemented in files fts3_tokenizer1.c, fts3_porter.c and fts3_icu.c |
+** respectively. The following three forward declarations are for functions |
+** declared in these files used to retrieve the respective implementations. |
** |
** Calling sqlite3Fts3SimpleTokenizerModule() sets the value pointed |
-** to by the argument to point a the "simple" tokenizer implementation. |
-** Function ...PorterTokenizerModule() sets *pModule to point to the |
-** porter tokenizer/stemmer implementation. |
+** to by the argument to point to the "simple" tokenizer implementation. |
+** And so on. |
*/ |
void sqlite3Fts3SimpleTokenizerModule(sqlite3_tokenizer_module const**ppModule); |
void sqlite3Fts3PorterTokenizerModule(sqlite3_tokenizer_module const**ppModule); |
+#ifdef SQLITE_ENABLE_ICU |
void sqlite3Fts3IcuTokenizerModule(sqlite3_tokenizer_module const**ppModule); |
- |
-int sqlite3Fts3InitHashTable(sqlite3 *, fts3Hash *, const char *); |
+#endif |
/* |
** Initialise the fts3 extension. If this extension is built as part |
@@ -7336,19 +3607,23 @@ int sqlite3Fts3InitHashTable(sqlite3 *, fts3Hash *, const char *); |
*/ |
int sqlite3Fts3Init(sqlite3 *db){ |
int rc = SQLITE_OK; |
- fts3Hash *pHash = 0; |
+ Fts3Hash *pHash = 0; |
const sqlite3_tokenizer_module *pSimple = 0; |
const sqlite3_tokenizer_module *pPorter = 0; |
- const sqlite3_tokenizer_module *pIcu = 0; |
- sqlite3Fts3SimpleTokenizerModule(&pSimple); |
- sqlite3Fts3PorterTokenizerModule(&pPorter); |
#ifdef SQLITE_ENABLE_ICU |
+ const sqlite3_tokenizer_module *pIcu = 0; |
sqlite3Fts3IcuTokenizerModule(&pIcu); |
#endif |
+ rc = sqlite3Fts3InitAux(db); |
+ if( rc!=SQLITE_OK ) return rc; |
+ |
+ sqlite3Fts3SimpleTokenizerModule(&pSimple); |
+ sqlite3Fts3PorterTokenizerModule(&pPorter); |
+ |
/* Allocate and initialise the hash-table used to store tokenizers. */ |
- pHash = sqlite3_malloc(sizeof(fts3Hash)); |
+ pHash = sqlite3_malloc(sizeof(Fts3Hash)); |
if( !pHash ){ |
rc = SQLITE_NOMEM; |
}else{ |
@@ -7359,14 +3634,18 @@ int sqlite3Fts3Init(sqlite3 *db){ |
if( rc==SQLITE_OK ){ |
if( sqlite3Fts3HashInsert(pHash, "simple", 7, (void *)pSimple) |
|| sqlite3Fts3HashInsert(pHash, "porter", 7, (void *)pPorter) |
+#ifdef SQLITE_ENABLE_ICU |
|| (pIcu && sqlite3Fts3HashInsert(pHash, "icu", 4, (void *)pIcu)) |
+#endif |
){ |
rc = SQLITE_NOMEM; |
} |
} |
#ifdef SQLITE_TEST |
- sqlite3Fts3ExprInitTestInterface(db); |
+ if( rc==SQLITE_OK ){ |
+ rc = sqlite3Fts3ExprInitTestInterface(db); |
+ } |
#endif |
/* Create the virtual table wrapper around the hash-table and overload |
@@ -7380,16 +3659,24 @@ int sqlite3Fts3Init(sqlite3 *db){ |
&& SQLITE_OK==(rc = sqlite3Fts3InitHashTable(db, pHash, "fts3_tokenizer")) |
#endif |
&& SQLITE_OK==(rc = sqlite3_overload_function(db, "snippet", -1)) |
- && SQLITE_OK==(rc = sqlite3_overload_function(db, "offsets", -1)) |
- && SQLITE_OK==(rc = sqlite3_overload_function(db, "optimize", -1)) |
-#ifdef SQLITE_TEST |
- && SQLITE_OK==(rc = sqlite3_overload_function(db, "dump_terms", -1)) |
- && SQLITE_OK==(rc = sqlite3_overload_function(db, "dump_doclist", -1)) |
-#endif |
+ && SQLITE_OK==(rc = sqlite3_overload_function(db, "offsets", 1)) |
+ && SQLITE_OK==(rc = sqlite3_overload_function(db, "matchinfo", 1)) |
+ && SQLITE_OK==(rc = sqlite3_overload_function(db, "matchinfo", 2)) |
+ && SQLITE_OK==(rc = sqlite3_overload_function(db, "optimize", 1)) |
){ |
- return sqlite3_create_module_v2( |
+ rc = sqlite3_create_module_v2( |
db, "fts3", &fts3Module, (void *)pHash, hashDestroy |
); |
+#if CHROMIUM_FTS3_CHANGES && !SQLITE_TEST |
+ /* Disable fts4 pending review. */ |
+#else |
+ if( rc==SQLITE_OK ){ |
+ rc = sqlite3_create_module_v2( |
+ db, "fts4", &fts3Module, (void *)pHash, 0 |
+ ); |
+ } |
+#endif |
+ return rc; |
} |
/* An error has occurred. Delete the hash table and return the error code. */ |
@@ -7412,4 +3699,4 @@ int sqlite3_extension_init( |
} |
#endif |
-#endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) */ |
+#endif |