|
1 /* fts2 has a design flaw which can lead to database corruption (see |
|
2 ** below). It is recommended not to use it any longer, instead use |
|
3 ** fts3 (or higher). If you believe that your use of fts2 is safe, |
|
4 ** add -DSQLITE_ENABLE_BROKEN_FTS2=1 to your CFLAGS. |
|
5 */ |
|
6 #if (!defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS2)) \ |
|
7 && !defined(SQLITE_ENABLE_BROKEN_FTS2) |
|
8 #error fts2 has a design flaw and has been deprecated. |
|
9 #endif |
|
10 /* The flaw is that fts2 uses the content table's unaliased rowid as |
|
11 ** the unique docid. fts2 embeds the rowid in the index it builds, |
|
12 ** and expects the rowid to not change. The SQLite VACUUM operation |
|
13 ** will renumber such rowids, thereby breaking fts2. If you are using |
|
14 ** fts2 in a system which has disabled VACUUM, then you can continue |
|
15 ** to use it safely. Note that PRAGMA auto_vacuum does NOT disable |
|
16 ** VACUUM, though systems using auto_vacuum are unlikely to invoke |
|
17 ** VACUUM. |
|
18 ** |
|
19 ** Unlike fts1, which is safe across VACUUM if you never delete |
|
20 ** documents, fts2 has a second exposure to this flaw, in the segments |
|
21 ** table. So fts2 should be considered unsafe across VACUUM in all |
|
22 ** cases. |
|
23 */ |
|
24 |
|
25 /* |
|
26 ** 2006 Oct 10 |
|
27 ** |
|
28 ** The author disclaims copyright to this source code. In place of |
|
29 ** a legal notice, here is a blessing: |
|
30 ** |
|
31 ** May you do good and not evil. |
|
32 ** May you find forgiveness for yourself and forgive others. |
|
33 ** May you share freely, never taking more than you give. |
|
34 ** |
|
35 ****************************************************************************** |
|
36 ** |
|
37 ** This is an SQLite module implementing full-text search. |
|
38 */ |
|
39 |
|
40 /* |
|
41 ** The code in this file is only compiled if: |
|
42 ** |
|
43 ** * The FTS2 module is being built as an extension |
|
44 ** (in which case SQLITE_CORE is not defined), or |
|
45 ** |
|
46 ** * The FTS2 module is being built into the core of |
|
47 ** SQLite (in which case SQLITE_ENABLE_FTS2 is defined). |
|
48 */ |
|
49 |
|
50 /* TODO(shess) Consider exporting this comment to an HTML file or the |
|
51 ** wiki. |
|
52 */ |
|
53 /* The full-text index is stored in a series of b+tree (-like) |
|
54 ** structures called segments which map terms to doclists. The |
|
55 ** structures are like b+trees in layout, but are constructed from the |
|
56 ** bottom up in optimal fashion and are not updatable. Since trees |
|
57 ** are built from the bottom up, things will be described from the |
|
58 ** bottom up. |
|
59 ** |
|
60 ** |
|
61 **** Varints **** |
|
62 ** The basic unit of encoding is a variable-length integer called a |
|
63 ** varint. We encode variable-length integers in little-endian order |
|
64 ** using seven bits * per byte as follows: |
|
65 ** |
|
66 ** KEY: |
|
67 ** A = 0xxxxxxx 7 bits of data and one flag bit |
|
68 ** B = 1xxxxxxx 7 bits of data and one flag bit |
|
69 ** |
|
70 ** 7 bits - A |
|
71 ** 14 bits - BA |
|
72 ** 21 bits - BBA |
|
73 ** and so on. |
|
74 ** |
|
75 ** This is identical to how sqlite encodes varints (see util.c). |
|
76 ** |
|
77 ** |
|
78 **** Document lists **** |
|
79 ** A doclist (document list) holds a docid-sorted list of hits for a |
|
80 ** given term. Doclists hold docids, and can optionally associate |
|
81 ** token positions and offsets with docids. |
|
82 ** |
|
83 ** A DL_POSITIONS_OFFSETS doclist is stored like this: |
|
84 ** |
|
85 ** array { |
|
86 ** varint docid; |
|
87 ** array { (position list for column 0) |
|
88 ** varint position; (delta from previous position plus POS_BASE) |
|
89 ** varint startOffset; (delta from previous startOffset) |
|
90 ** varint endOffset; (delta from startOffset) |
|
91 ** } |
|
92 ** array { |
|
93 ** varint POS_COLUMN; (marks start of position list for new column) |
|
94 ** varint column; (index of new column) |
|
95 ** array { |
|
96 ** varint position; (delta from previous position plus POS_BASE) |
|
97 ** varint startOffset;(delta from previous startOffset) |
|
98 ** varint endOffset; (delta from startOffset) |
|
99 ** } |
|
100 ** } |
|
101 ** varint POS_END; (marks end of positions for this document. |
|
102 ** } |
|
103 ** |
|
104 ** Here, array { X } means zero or more occurrences of X, adjacent in |
|
105 ** memory. A "position" is an index of a token in the token stream |
|
106 ** generated by the tokenizer, while an "offset" is a byte offset, |
|
107 ** both based at 0. Note that POS_END and POS_COLUMN occur in the |
|
108 ** same logical place as the position element, and act as sentinals |
|
109 ** ending a position list array. |
|
110 ** |
|
111 ** A DL_POSITIONS doclist omits the startOffset and endOffset |
|
112 ** information. A DL_DOCIDS doclist omits both the position and |
|
113 ** offset information, becoming an array of varint-encoded docids. |
|
114 ** |
|
115 ** On-disk data is stored as type DL_DEFAULT, so we don't serialize |
|
116 ** the type. Due to how deletion is implemented in the segmentation |
|
117 ** system, on-disk doclists MUST store at least positions. |
|
118 ** |
|
119 ** |
|
120 **** Segment leaf nodes **** |
|
121 ** Segment leaf nodes store terms and doclists, ordered by term. Leaf |
|
122 ** nodes are written using LeafWriter, and read using LeafReader (to |
|
123 ** iterate through a single leaf node's data) and LeavesReader (to |
|
124 ** iterate through a segment's entire leaf layer). Leaf nodes have |
|
125 ** the format: |
|
126 ** |
|
127 ** varint iHeight; (height from leaf level, always 0) |
|
128 ** varint nTerm; (length of first term) |
|
129 ** char pTerm[nTerm]; (content of first term) |
|
130 ** varint nDoclist; (length of term's associated doclist) |
|
131 ** char pDoclist[nDoclist]; (content of doclist) |
|
132 ** array { |
|
133 ** (further terms are delta-encoded) |
|
134 ** varint nPrefix; (length of prefix shared with previous term) |
|
135 ** varint nSuffix; (length of unshared suffix) |
|
136 ** char pTermSuffix[nSuffix];(unshared suffix of next term) |
|
137 ** varint nDoclist; (length of term's associated doclist) |
|
138 ** char pDoclist[nDoclist]; (content of doclist) |
|
139 ** } |
|
140 ** |
|
141 ** Here, array { X } means zero or more occurrences of X, adjacent in |
|
142 ** memory. |
|
143 ** |
|
144 ** Leaf nodes are broken into blocks which are stored contiguously in |
|
145 ** the %_segments table in sorted order. This means that when the end |
|
146 ** of a node is reached, the next term is in the node with the next |
|
147 ** greater node id. |
|
148 ** |
|
149 ** New data is spilled to a new leaf node when the current node |
|
150 ** exceeds LEAF_MAX bytes (default 2048). New data which itself is |
|
151 ** larger than STANDALONE_MIN (default 1024) is placed in a standalone |
|
152 ** node (a leaf node with a single term and doclist). The goal of |
|
153 ** these settings is to pack together groups of small doclists while |
|
154 ** making it efficient to directly access large doclists. The |
|
155 ** assumption is that large doclists represent terms which are more |
|
156 ** likely to be query targets. |
|
157 ** |
|
158 ** TODO(shess) It may be useful for blocking decisions to be more |
|
159 ** dynamic. For instance, it may make more sense to have a 2.5k leaf |
|
160 ** node rather than splitting into 2k and .5k nodes. My intuition is |
|
161 ** that this might extend through 2x or 4x the pagesize. |
|
162 ** |
|
163 ** |
|
164 **** Segment interior nodes **** |
|
165 ** Segment interior nodes store blockids for subtree nodes and terms |
|
166 ** to describe what data is stored by the each subtree. Interior |
|
167 ** nodes are written using InteriorWriter, and read using |
|
168 ** InteriorReader. InteriorWriters are created as needed when |
|
169 ** SegmentWriter creates new leaf nodes, or when an interior node |
|
170 ** itself grows too big and must be split. The format of interior |
|
171 ** nodes: |
|
172 ** |
|
173 ** varint iHeight; (height from leaf level, always >0) |
|
174 ** varint iBlockid; (block id of node's leftmost subtree) |
|
175 ** optional { |
|
176 ** varint nTerm; (length of first term) |
|
177 ** char pTerm[nTerm]; (content of first term) |
|
178 ** array { |
|
179 ** (further terms are delta-encoded) |
|
180 ** varint nPrefix; (length of shared prefix with previous term) |
|
181 ** varint nSuffix; (length of unshared suffix) |
|
182 ** char pTermSuffix[nSuffix]; (unshared suffix of next term) |
|
183 ** } |
|
184 ** } |
|
185 ** |
|
186 ** Here, optional { X } means an optional element, while array { X } |
|
187 ** means zero or more occurrences of X, adjacent in memory. |
|
188 ** |
|
189 ** An interior node encodes n terms separating n+1 subtrees. The |
|
190 ** subtree blocks are contiguous, so only the first subtree's blockid |
|
191 ** is encoded. The subtree at iBlockid will contain all terms less |
|
192 ** than the first term encoded (or all terms if no term is encoded). |
|
193 ** Otherwise, for terms greater than or equal to pTerm[i] but less |
|
194 ** than pTerm[i+1], the subtree for that term will be rooted at |
|
195 ** iBlockid+i. Interior nodes only store enough term data to |
|
196 ** distinguish adjacent children (if the rightmost term of the left |
|
197 ** child is "something", and the leftmost term of the right child is |
|
198 ** "wicked", only "w" is stored). |
|
199 ** |
|
200 ** New data is spilled to a new interior node at the same height when |
|
201 ** the current node exceeds INTERIOR_MAX bytes (default 2048). |
|
202 ** INTERIOR_MIN_TERMS (default 7) keeps large terms from monopolizing |
|
203 ** interior nodes and making the tree too skinny. The interior nodes |
|
204 ** at a given height are naturally tracked by interior nodes at |
|
205 ** height+1, and so on. |
|
206 ** |
|
207 ** |
|
208 **** Segment directory **** |
|
209 ** The segment directory in table %_segdir stores meta-information for |
|
210 ** merging and deleting segments, and also the root node of the |
|
211 ** segment's tree. |
|
212 ** |
|
213 ** The root node is the top node of the segment's tree after encoding |
|
214 ** the entire segment, restricted to ROOT_MAX bytes (default 1024). |
|
215 ** This could be either a leaf node or an interior node. If the top |
|
216 ** node requires more than ROOT_MAX bytes, it is flushed to %_segments |
|
217 ** and a new root interior node is generated (which should always fit |
|
218 ** within ROOT_MAX because it only needs space for 2 varints, the |
|
219 ** height and the blockid of the previous root). |
|
220 ** |
|
221 ** The meta-information in the segment directory is: |
|
222 ** level - segment level (see below) |
|
223 ** idx - index within level |
|
224 ** - (level,idx uniquely identify a segment) |
|
225 ** start_block - first leaf node |
|
226 ** leaves_end_block - last leaf node |
|
227 ** end_block - last block (including interior nodes) |
|
228 ** root - contents of root node |
|
229 ** |
|
230 ** If the root node is a leaf node, then start_block, |
|
231 ** leaves_end_block, and end_block are all 0. |
|
232 ** |
|
233 ** |
|
234 **** Segment merging **** |
|
235 ** To amortize update costs, segments are groups into levels and |
|
236 ** merged in matches. Each increase in level represents exponentially |
|
237 ** more documents. |
|
238 ** |
|
239 ** New documents (actually, document updates) are tokenized and |
|
240 ** written individually (using LeafWriter) to a level 0 segment, with |
|
241 ** incrementing idx. When idx reaches MERGE_COUNT (default 16), all |
|
242 ** level 0 segments are merged into a single level 1 segment. Level 1 |
|
243 ** is populated like level 0, and eventually MERGE_COUNT level 1 |
|
244 ** segments are merged to a single level 2 segment (representing |
|
245 ** MERGE_COUNT^2 updates), and so on. |
|
246 ** |
|
247 ** A segment merge traverses all segments at a given level in |
|
248 ** parallel, performing a straightforward sorted merge. Since segment |
|
249 ** leaf nodes are written in to the %_segments table in order, this |
|
250 ** merge traverses the underlying sqlite disk structures efficiently. |
|
251 ** After the merge, all segment blocks from the merged level are |
|
252 ** deleted. |
|
253 ** |
|
254 ** MERGE_COUNT controls how often we merge segments. 16 seems to be |
|
255 ** somewhat of a sweet spot for insertion performance. 32 and 64 show |
|
256 ** very similar performance numbers to 16 on insertion, though they're |
|
257 ** a tiny bit slower (perhaps due to more overhead in merge-time |
|
258 ** sorting). 8 is about 20% slower than 16, 4 about 50% slower than |
|
259 ** 16, 2 about 66% slower than 16. |
|
260 ** |
|
261 ** At query time, high MERGE_COUNT increases the number of segments |
|
262 ** which need to be scanned and merged. For instance, with 100k docs |
|
263 ** inserted: |
|
264 ** |
|
265 ** MERGE_COUNT segments |
|
266 ** 16 25 |
|
267 ** 8 12 |
|
268 ** 4 10 |
|
269 ** 2 6 |
|
270 ** |
|
271 ** This appears to have only a moderate impact on queries for very |
|
272 ** frequent terms (which are somewhat dominated by segment merge |
|
273 ** costs), and infrequent and non-existent terms still seem to be fast |
|
274 ** even with many segments. |
|
275 ** |
|
276 ** TODO(shess) That said, it would be nice to have a better query-side |
|
277 ** argument for MERGE_COUNT of 16. Also, it is possible/likely that |
|
278 ** optimizations to things like doclist merging will swing the sweet |
|
279 ** spot around. |
|
280 ** |
|
281 ** |
|
282 ** |
|
283 **** Handling of deletions and updates **** |
|
284 ** Since we're using a segmented structure, with no docid-oriented |
|
285 ** index into the term index, we clearly cannot simply update the term |
|
286 ** index when a document is deleted or updated. For deletions, we |
|
287 ** write an empty doclist (varint(docid) varint(POS_END)), for updates |
|
288 ** we simply write the new doclist. Segment merges overwrite older |
|
289 ** data for a particular docid with newer data, so deletes or updates |
|
290 ** will eventually overtake the earlier data and knock it out. The |
|
291 ** query logic likewise merges doclists so that newer data knocks out |
|
292 ** older data. |
|
293 ** |
|
294 ** TODO(shess) Provide a VACUUM type operation to clear out all |
|
295 ** deletions and duplications. This would basically be a forced merge |
|
296 ** into a single segment. |
|
297 */ |
|
298 |
|
299 #if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS2) |
|
300 |
|
301 #if defined(SQLITE_ENABLE_FTS2) && !defined(SQLITE_CORE) |
|
302 # define SQLITE_CORE 1 |
|
303 #endif |
|
304 |
|
305 #include <assert.h> |
|
306 #include <stdlib.h> |
|
307 #include <stdio.h> |
|
308 #include <string.h> |
|
309 #include <ctype.h> |
|
310 |
|
311 #include "fts2.h" |
|
312 #include "fts2_hash.h" |
|
313 #include "fts2_tokenizer.h" |
|
314 #include "sqlite3.h" |
|
315 #include "sqlite3ext.h" |
|
316 SQLITE_EXTENSION_INIT1 |
|
317 |
|
318 |
|
319 /* TODO(shess) MAN, this thing needs some refactoring. At minimum, it |
|
320 ** would be nice to order the file better, perhaps something along the |
|
321 ** lines of: |
|
322 ** |
|
323 ** - utility functions |
|
324 ** - table setup functions |
|
325 ** - table update functions |
|
326 ** - table query functions |
|
327 ** |
|
328 ** Put the query functions last because they're likely to reference |
|
329 ** typedefs or functions from the table update section. |
|
330 */ |
|
331 |
|
332 #if 0 |
|
333 # define TRACE(A) printf A; fflush(stdout) |
|
334 #else |
|
335 # define TRACE(A) |
|
336 #endif |
|
337 |
|
338 /* It is not safe to call isspace(), tolower(), or isalnum() on |
|
339 ** hi-bit-set characters. This is the same solution used in the |
|
340 ** tokenizer. |
|
341 */ |
|
342 /* TODO(shess) The snippet-generation code should be using the |
|
343 ** tokenizer-generated tokens rather than doing its own local |
|
344 ** tokenization. |
|
345 */ |
|
346 /* TODO(shess) Is __isascii() a portable version of (c&0x80)==0? */ |
|
347 static int safe_isspace(char c){ |
|
348 return (c&0x80)==0 ? isspace(c) : 0; |
|
349 } |
|
350 static int safe_tolower(char c){ |
|
351 return (c&0x80)==0 ? tolower(c) : c; |
|
352 } |
|
353 static int safe_isalnum(char c){ |
|
354 return (c&0x80)==0 ? isalnum(c) : 0; |
|
355 } |
|
356 |
|
357 typedef enum DocListType { |
|
358 DL_DOCIDS, /* docids only */ |
|
359 DL_POSITIONS, /* docids + positions */ |
|
360 DL_POSITIONS_OFFSETS /* docids + positions + offsets */ |
|
361 } DocListType; |
|
362 |
|
363 /* |
|
364 ** By default, only positions and not offsets are stored in the doclists. |
|
365 ** To change this so that offsets are stored too, compile with |
|
366 ** |
|
367 ** -DDL_DEFAULT=DL_POSITIONS_OFFSETS |
|
368 ** |
|
369 ** If DL_DEFAULT is set to DL_DOCIDS, your table can only be inserted |
|
370 ** into (no deletes or updates). |
|
371 */ |
|
372 #ifndef DL_DEFAULT |
|
373 # define DL_DEFAULT DL_POSITIONS |
|
374 #endif |
|
375 |
|
376 enum { |
|
377 POS_END = 0, /* end of this position list */ |
|
378 POS_COLUMN, /* followed by new column number */ |
|
379 POS_BASE |
|
380 }; |
|
381 |
|
382 /* MERGE_COUNT controls how often we merge segments (see comment at |
|
383 ** top of file). |
|
384 */ |
|
385 #define MERGE_COUNT 16 |
|
386 |
|
387 /* utility functions */ |
|
388 |
|
389 /* CLEAR() and SCRAMBLE() abstract memset() on a pointer to a single |
|
390 ** record to prevent errors of the form: |
|
391 ** |
|
392 ** my_function(SomeType *b){ |
|
393 ** memset(b, '\0', sizeof(b)); // sizeof(b)!=sizeof(*b) |
|
394 ** } |
|
395 */ |
|
396 /* TODO(shess) Obvious candidates for a header file. */ |
|
397 #define CLEAR(b) memset(b, '\0', sizeof(*(b))) |
|
398 |
|
399 #ifndef NDEBUG |
|
400 # define SCRAMBLE(b) memset(b, 0x55, sizeof(*(b))) |
|
401 #else |
|
402 # define SCRAMBLE(b) |
|
403 #endif |
|
404 |
|
405 /* We may need up to VARINT_MAX bytes to store an encoded 64-bit integer. */ |
|
406 #define VARINT_MAX 10 |
|
407 |
|
408 /* Write a 64-bit variable-length integer to memory starting at p[0]. |
|
409 * The length of data written will be between 1 and VARINT_MAX bytes. |
|
410 * The number of bytes written is returned. */ |
|
411 static int putVarint(char *p, sqlite_int64 v){ |
|
412 unsigned char *q = (unsigned char *) p; |
|
413 sqlite_uint64 vu = v; |
|
414 do{ |
|
415 *q++ = (unsigned char) ((vu & 0x7f) | 0x80); |
|
416 vu >>= 7; |
|
417 }while( vu!=0 ); |
|
418 q[-1] &= 0x7f; /* turn off high bit in final byte */ |
|
419 assert( q - (unsigned char *)p <= VARINT_MAX ); |
|
420 return (int) (q - (unsigned char *)p); |
|
421 } |
|
422 |
|
423 /* Read a 64-bit variable-length integer from memory starting at p[0]. |
|
424 * Return the number of bytes read, or 0 on error. |
|
425 * The value is stored in *v. */ |
|
426 static int getVarint(const char *p, sqlite_int64 *v){ |
|
427 const unsigned char *q = (const unsigned char *) p; |
|
428 sqlite_uint64 x = 0, y = 1; |
|
429 while( (*q & 0x80) == 0x80 ){ |
|
430 x += y * (*q++ & 0x7f); |
|
431 y <<= 7; |
|
432 if( q - (unsigned char *)p >= VARINT_MAX ){ /* bad data */ |
|
433 assert( 0 ); |
|
434 return 0; |
|
435 } |
|
436 } |
|
437 x += y * (*q++); |
|
438 *v = (sqlite_int64) x; |
|
439 return (int) (q - (unsigned char *)p); |
|
440 } |
|
441 |
|
442 static int getVarint32(const char *p, int *pi){ |
|
443 sqlite_int64 i; |
|
444 int ret = getVarint(p, &i); |
|
445 *pi = (int) i; |
|
446 assert( *pi==i ); |
|
447 return ret; |
|
448 } |
|
449 |
|
450 /*******************************************************************/ |
|
451 /* DataBuffer is used to collect data into a buffer in piecemeal |
|
452 ** fashion. It implements the usual distinction between amount of |
|
453 ** data currently stored (nData) and buffer capacity (nCapacity). |
|
454 ** |
|
455 ** dataBufferInit - create a buffer with given initial capacity. |
|
456 ** dataBufferReset - forget buffer's data, retaining capacity. |
|
457 ** dataBufferDestroy - free buffer's data. |
|
458 ** dataBufferSwap - swap contents of two buffers. |
|
459 ** dataBufferExpand - expand capacity without adding data. |
|
460 ** dataBufferAppend - append data. |
|
461 ** dataBufferAppend2 - append two pieces of data at once. |
|
462 ** dataBufferReplace - replace buffer's data. |
|
463 */ |
|
464 typedef struct DataBuffer { |
|
465 char *pData; /* Pointer to malloc'ed buffer. */ |
|
466 int nCapacity; /* Size of pData buffer. */ |
|
467 int nData; /* End of data loaded into pData. */ |
|
468 } DataBuffer; |
|
469 |
|
470 static void dataBufferInit(DataBuffer *pBuffer, int nCapacity){ |
|
471 assert( nCapacity>=0 ); |
|
472 pBuffer->nData = 0; |
|
473 pBuffer->nCapacity = nCapacity; |
|
474 pBuffer->pData = nCapacity==0 ? NULL : sqlite3_malloc(nCapacity); |
|
475 } |
|
476 static void dataBufferReset(DataBuffer *pBuffer){ |
|
477 pBuffer->nData = 0; |
|
478 } |
|
479 static void dataBufferDestroy(DataBuffer *pBuffer){ |
|
480 if( pBuffer->pData!=NULL ) sqlite3_free(pBuffer->pData); |
|
481 SCRAMBLE(pBuffer); |
|
482 } |
|
483 static void dataBufferSwap(DataBuffer *pBuffer1, DataBuffer *pBuffer2){ |
|
484 DataBuffer tmp = *pBuffer1; |
|
485 *pBuffer1 = *pBuffer2; |
|
486 *pBuffer2 = tmp; |
|
487 } |
|
488 static void dataBufferExpand(DataBuffer *pBuffer, int nAddCapacity){ |
|
489 assert( nAddCapacity>0 ); |
|
490 /* TODO(shess) Consider expanding more aggressively. Note that the |
|
491 ** underlying malloc implementation may take care of such things for |
|
492 ** us already. |
|
493 */ |
|
494 if( pBuffer->nData+nAddCapacity>pBuffer->nCapacity ){ |
|
495 pBuffer->nCapacity = pBuffer->nData+nAddCapacity; |
|
496 pBuffer->pData = sqlite3_realloc(pBuffer->pData, pBuffer->nCapacity); |
|
497 } |
|
498 } |
|
499 static void dataBufferAppend(DataBuffer *pBuffer, |
|
500 const char *pSource, int nSource){ |
|
501 assert( nSource>0 && pSource!=NULL ); |
|
502 dataBufferExpand(pBuffer, nSource); |
|
503 memcpy(pBuffer->pData+pBuffer->nData, pSource, nSource); |
|
504 pBuffer->nData += nSource; |
|
505 } |
|
506 static void dataBufferAppend2(DataBuffer *pBuffer, |
|
507 const char *pSource1, int nSource1, |
|
508 const char *pSource2, int nSource2){ |
|
509 assert( nSource1>0 && pSource1!=NULL ); |
|
510 assert( nSource2>0 && pSource2!=NULL ); |
|
511 dataBufferExpand(pBuffer, nSource1+nSource2); |
|
512 memcpy(pBuffer->pData+pBuffer->nData, pSource1, nSource1); |
|
513 memcpy(pBuffer->pData+pBuffer->nData+nSource1, pSource2, nSource2); |
|
514 pBuffer->nData += nSource1+nSource2; |
|
515 } |
|
516 static void dataBufferReplace(DataBuffer *pBuffer, |
|
517 const char *pSource, int nSource){ |
|
518 dataBufferReset(pBuffer); |
|
519 dataBufferAppend(pBuffer, pSource, nSource); |
|
520 } |
|
521 |
|
522 /* StringBuffer is a null-terminated version of DataBuffer. */ |
|
523 typedef struct StringBuffer { |
|
524 DataBuffer b; /* Includes null terminator. */ |
|
525 } StringBuffer; |
|
526 |
|
527 static void initStringBuffer(StringBuffer *sb){ |
|
528 dataBufferInit(&sb->b, 100); |
|
529 dataBufferReplace(&sb->b, "", 1); |
|
530 } |
|
531 static int stringBufferLength(StringBuffer *sb){ |
|
532 return sb->b.nData-1; |
|
533 } |
|
534 static char *stringBufferData(StringBuffer *sb){ |
|
535 return sb->b.pData; |
|
536 } |
|
537 static void stringBufferDestroy(StringBuffer *sb){ |
|
538 dataBufferDestroy(&sb->b); |
|
539 } |
|
540 |
|
541 static void nappend(StringBuffer *sb, const char *zFrom, int nFrom){ |
|
542 assert( sb->b.nData>0 ); |
|
543 if( nFrom>0 ){ |
|
544 sb->b.nData--; |
|
545 dataBufferAppend2(&sb->b, zFrom, nFrom, "", 1); |
|
546 } |
|
547 } |
|
548 static void append(StringBuffer *sb, const char *zFrom){ |
|
549 nappend(sb, zFrom, strlen(zFrom)); |
|
550 } |
|
551 |
|
552 /* Append a list of strings separated by commas. */ |
|
553 static void appendList(StringBuffer *sb, int nString, char **azString){ |
|
554 int i; |
|
555 for(i=0; i<nString; ++i){ |
|
556 if( i>0 ) append(sb, ", "); |
|
557 append(sb, azString[i]); |
|
558 } |
|
559 } |
|
560 |
|
561 static int endsInWhiteSpace(StringBuffer *p){ |
|
562 return stringBufferLength(p)>0 && |
|
563 safe_isspace(stringBufferData(p)[stringBufferLength(p)-1]); |
|
564 } |
|
565 |
|
566 /* If the StringBuffer ends in something other than white space, add a |
|
567 ** single space character to the end. |
|
568 */ |
|
569 static void appendWhiteSpace(StringBuffer *p){ |
|
570 if( stringBufferLength(p)==0 ) return; |
|
571 if( !endsInWhiteSpace(p) ) append(p, " "); |
|
572 } |
|
573 |
|
574 /* Remove white space from the end of the StringBuffer */ |
|
575 static void trimWhiteSpace(StringBuffer *p){ |
|
576 while( endsInWhiteSpace(p) ){ |
|
577 p->b.pData[--p->b.nData-1] = '\0'; |
|
578 } |
|
579 } |
|
580 |
|
581 /*******************************************************************/ |
|
582 /* DLReader is used to read document elements from a doclist. The |
|
583 ** current docid is cached, so dlrDocid() is fast. DLReader does not |
|
584 ** own the doclist buffer. |
|
585 ** |
|
586 ** dlrAtEnd - true if there's no more data to read. |
|
587 ** dlrDocid - docid of current document. |
|
588 ** dlrDocData - doclist data for current document (including docid). |
|
589 ** dlrDocDataBytes - length of same. |
|
590 ** dlrAllDataBytes - length of all remaining data. |
|
591 ** dlrPosData - position data for current document. |
|
592 ** dlrPosDataLen - length of pos data for current document (incl POS_END). |
|
593 ** dlrStep - step to current document. |
|
594 ** dlrInit - initial for doclist of given type against given data. |
|
595 ** dlrDestroy - clean up. |
|
596 ** |
|
597 ** Expected usage is something like: |
|
598 ** |
|
599 ** DLReader reader; |
|
600 ** dlrInit(&reader, pData, nData); |
|
601 ** while( !dlrAtEnd(&reader) ){ |
|
602 ** // calls to dlrDocid() and kin. |
|
603 ** dlrStep(&reader); |
|
604 ** } |
|
605 ** dlrDestroy(&reader); |
|
606 */ |
|
607 typedef struct DLReader { |
|
608 DocListType iType; |
|
609 const char *pData; |
|
610 int nData; |
|
611 |
|
612 sqlite_int64 iDocid; |
|
613 int nElement; |
|
614 } DLReader; |
|
615 |
|
616 static int dlrAtEnd(DLReader *pReader){ |
|
617 assert( pReader->nData>=0 ); |
|
618 return pReader->nData==0; |
|
619 } |
|
620 static sqlite_int64 dlrDocid(DLReader *pReader){ |
|
621 assert( !dlrAtEnd(pReader) ); |
|
622 return pReader->iDocid; |
|
623 } |
|
624 static const char *dlrDocData(DLReader *pReader){ |
|
625 assert( !dlrAtEnd(pReader) ); |
|
626 return pReader->pData; |
|
627 } |
|
628 static int dlrDocDataBytes(DLReader *pReader){ |
|
629 assert( !dlrAtEnd(pReader) ); |
|
630 return pReader->nElement; |
|
631 } |
|
632 static int dlrAllDataBytes(DLReader *pReader){ |
|
633 assert( !dlrAtEnd(pReader) ); |
|
634 return pReader->nData; |
|
635 } |
|
636 /* TODO(shess) Consider adding a field to track iDocid varint length |
|
637 ** to make these two functions faster. This might matter (a tiny bit) |
|
638 ** for queries. |
|
639 */ |
|
640 static const char *dlrPosData(DLReader *pReader){ |
|
641 sqlite_int64 iDummy; |
|
642 int n = getVarint(pReader->pData, &iDummy); |
|
643 assert( !dlrAtEnd(pReader) ); |
|
644 return pReader->pData+n; |
|
645 } |
|
646 static int dlrPosDataLen(DLReader *pReader){ |
|
647 sqlite_int64 iDummy; |
|
648 int n = getVarint(pReader->pData, &iDummy); |
|
649 assert( !dlrAtEnd(pReader) ); |
|
650 return pReader->nElement-n; |
|
651 } |
|
652 static void dlrStep(DLReader *pReader){ |
|
653 assert( !dlrAtEnd(pReader) ); |
|
654 |
|
655 /* Skip past current doclist element. */ |
|
656 assert( pReader->nElement<=pReader->nData ); |
|
657 pReader->pData += pReader->nElement; |
|
658 pReader->nData -= pReader->nElement; |
|
659 |
|
660 /* If there is more data, read the next doclist element. */ |
|
661 if( pReader->nData!=0 ){ |
|
662 sqlite_int64 iDocidDelta; |
|
663 int iDummy, n = getVarint(pReader->pData, &iDocidDelta); |
|
664 pReader->iDocid += iDocidDelta; |
|
665 if( pReader->iType>=DL_POSITIONS ){ |
|
666 assert( n<pReader->nData ); |
|
667 while( 1 ){ |
|
668 n += getVarint32(pReader->pData+n, &iDummy); |
|
669 assert( n<=pReader->nData ); |
|
670 if( iDummy==POS_END ) break; |
|
671 if( iDummy==POS_COLUMN ){ |
|
672 n += getVarint32(pReader->pData+n, &iDummy); |
|
673 assert( n<pReader->nData ); |
|
674 }else if( pReader->iType==DL_POSITIONS_OFFSETS ){ |
|
675 n += getVarint32(pReader->pData+n, &iDummy); |
|
676 n += getVarint32(pReader->pData+n, &iDummy); |
|
677 assert( n<pReader->nData ); |
|
678 } |
|
679 } |
|
680 } |
|
681 pReader->nElement = n; |
|
682 assert( pReader->nElement<=pReader->nData ); |
|
683 } |
|
684 } |
|
685 static void dlrInit(DLReader *pReader, DocListType iType, |
|
686 const char *pData, int nData){ |
|
687 assert( pData!=NULL && nData!=0 ); |
|
688 pReader->iType = iType; |
|
689 pReader->pData = pData; |
|
690 pReader->nData = nData; |
|
691 pReader->nElement = 0; |
|
692 pReader->iDocid = 0; |
|
693 |
|
694 /* Load the first element's data. There must be a first element. */ |
|
695 dlrStep(pReader); |
|
696 } |
|
697 static void dlrDestroy(DLReader *pReader){ |
|
698 SCRAMBLE(pReader); |
|
699 } |
|
700 |
|
701 #ifndef NDEBUG |
|
702 /* Verify that the doclist can be validly decoded. Also returns the |
|
703 ** last docid found because it is convenient in other assertions for |
|
704 ** DLWriter. |
|
705 */ |
|
706 static void docListValidate(DocListType iType, const char *pData, int nData, |
|
707 sqlite_int64 *pLastDocid){ |
|
708 sqlite_int64 iPrevDocid = 0; |
|
709 assert( nData>0 ); |
|
710 assert( pData!=0 ); |
|
711 assert( pData+nData>pData ); |
|
712 while( nData!=0 ){ |
|
713 sqlite_int64 iDocidDelta; |
|
714 int n = getVarint(pData, &iDocidDelta); |
|
715 iPrevDocid += iDocidDelta; |
|
716 if( iType>DL_DOCIDS ){ |
|
717 int iDummy; |
|
718 while( 1 ){ |
|
719 n += getVarint32(pData+n, &iDummy); |
|
720 if( iDummy==POS_END ) break; |
|
721 if( iDummy==POS_COLUMN ){ |
|
722 n += getVarint32(pData+n, &iDummy); |
|
723 }else if( iType>DL_POSITIONS ){ |
|
724 n += getVarint32(pData+n, &iDummy); |
|
725 n += getVarint32(pData+n, &iDummy); |
|
726 } |
|
727 assert( n<=nData ); |
|
728 } |
|
729 } |
|
730 assert( n<=nData ); |
|
731 pData += n; |
|
732 nData -= n; |
|
733 } |
|
734 if( pLastDocid ) *pLastDocid = iPrevDocid; |
|
735 } |
|
736 #define ASSERT_VALID_DOCLIST(i, p, n, o) docListValidate(i, p, n, o) |
|
737 #else |
|
738 #define ASSERT_VALID_DOCLIST(i, p, n, o) assert( 1 ) |
|
739 #endif |
|
740 |
|
741 /*******************************************************************/ |
|
742 /* DLWriter is used to write doclist data to a DataBuffer. DLWriter |
|
743 ** always appends to the buffer and does not own it. |
|
744 ** |
|
745 ** dlwInit - initialize to write a given type doclistto a buffer. |
|
746 ** dlwDestroy - clear the writer's memory. Does not free buffer. |
|
747 ** dlwAppend - append raw doclist data to buffer. |
|
748 ** dlwCopy - copy next doclist from reader to writer. |
|
749 ** dlwAdd - construct doclist element and append to buffer. |
|
750 ** Only apply dlwAdd() to DL_DOCIDS doclists (else use PLWriter). |
|
751 */ |
|
752 typedef struct DLWriter { |
|
753 DocListType iType; |
|
754 DataBuffer *b; |
|
755 sqlite_int64 iPrevDocid; |
|
756 #ifndef NDEBUG |
|
757 int has_iPrevDocid; |
|
758 #endif |
|
759 } DLWriter; |
|
760 |
|
761 static void dlwInit(DLWriter *pWriter, DocListType iType, DataBuffer *b){ |
|
762 pWriter->b = b; |
|
763 pWriter->iType = iType; |
|
764 pWriter->iPrevDocid = 0; |
|
765 #ifndef NDEBUG |
|
766 pWriter->has_iPrevDocid = 0; |
|
767 #endif |
|
768 } |
|
769 static void dlwDestroy(DLWriter *pWriter){ |
|
770 SCRAMBLE(pWriter); |
|
771 } |
|
772 /* iFirstDocid is the first docid in the doclist in pData. It is |
|
773 ** needed because pData may point within a larger doclist, in which |
|
774 ** case the first item would be delta-encoded. |
|
775 ** |
|
776 ** iLastDocid is the final docid in the doclist in pData. It is |
|
777 ** needed to create the new iPrevDocid for future delta-encoding. The |
|
778 ** code could decode the passed doclist to recreate iLastDocid, but |
|
779 ** the only current user (docListMerge) already has decoded this |
|
780 ** information. |
|
781 */ |
|
782 /* TODO(shess) This has become just a helper for docListMerge. |
|
783 ** Consider a refactor to make this cleaner. |
|
784 */ |
|
785 static void dlwAppend(DLWriter *pWriter, |
|
786 const char *pData, int nData, |
|
787 sqlite_int64 iFirstDocid, sqlite_int64 iLastDocid){ |
|
788 sqlite_int64 iDocid = 0; |
|
789 char c[VARINT_MAX]; |
|
790 int nFirstOld, nFirstNew; /* Old and new varint len of first docid. */ |
|
791 #ifndef NDEBUG |
|
792 sqlite_int64 iLastDocidDelta; |
|
793 #endif |
|
794 |
|
795 /* Recode the initial docid as delta from iPrevDocid. */ |
|
796 nFirstOld = getVarint(pData, &iDocid); |
|
797 assert( nFirstOld<nData || (nFirstOld==nData && pWriter->iType==DL_DOCIDS) ); |
|
798 nFirstNew = putVarint(c, iFirstDocid-pWriter->iPrevDocid); |
|
799 |
|
800 /* Verify that the incoming doclist is valid AND that it ends with |
|
801 ** the expected docid. This is essential because we'll trust this |
|
802 ** docid in future delta-encoding. |
|
803 */ |
|
804 ASSERT_VALID_DOCLIST(pWriter->iType, pData, nData, &iLastDocidDelta); |
|
805 assert( iLastDocid==iFirstDocid-iDocid+iLastDocidDelta ); |
|
806 |
|
807 /* Append recoded initial docid and everything else. Rest of docids |
|
808 ** should have been delta-encoded from previous initial docid. |
|
809 */ |
|
810 if( nFirstOld<nData ){ |
|
811 dataBufferAppend2(pWriter->b, c, nFirstNew, |
|
812 pData+nFirstOld, nData-nFirstOld); |
|
813 }else{ |
|
814 dataBufferAppend(pWriter->b, c, nFirstNew); |
|
815 } |
|
816 pWriter->iPrevDocid = iLastDocid; |
|
817 } |
|
818 static void dlwCopy(DLWriter *pWriter, DLReader *pReader){ |
|
819 dlwAppend(pWriter, dlrDocData(pReader), dlrDocDataBytes(pReader), |
|
820 dlrDocid(pReader), dlrDocid(pReader)); |
|
821 } |
|
822 static void dlwAdd(DLWriter *pWriter, sqlite_int64 iDocid){ |
|
823 char c[VARINT_MAX]; |
|
824 int n = putVarint(c, iDocid-pWriter->iPrevDocid); |
|
825 |
|
826 /* Docids must ascend. */ |
|
827 assert( !pWriter->has_iPrevDocid || iDocid>pWriter->iPrevDocid ); |
|
828 assert( pWriter->iType==DL_DOCIDS ); |
|
829 |
|
830 dataBufferAppend(pWriter->b, c, n); |
|
831 pWriter->iPrevDocid = iDocid; |
|
832 #ifndef NDEBUG |
|
833 pWriter->has_iPrevDocid = 1; |
|
834 #endif |
|
835 } |
|
836 |
|
837 /*******************************************************************/ |
|
838 /* PLReader is used to read data from a document's position list. As |
|
839 ** the caller steps through the list, data is cached so that varints |
|
840 ** only need to be decoded once. |
|
841 ** |
|
842 ** plrInit, plrDestroy - create/destroy a reader. |
|
843 ** plrColumn, plrPosition, plrStartOffset, plrEndOffset - accessors |
|
844 ** plrAtEnd - at end of stream, only call plrDestroy once true. |
|
845 ** plrStep - step to the next element. |
|
846 */ |
|
847 typedef struct PLReader { |
|
848 /* These refer to the next position's data. nData will reach 0 when |
|
849 ** reading the last position, so plrStep() signals EOF by setting |
|
850 ** pData to NULL. |
|
851 */ |
|
852 const char *pData; |
|
853 int nData; |
|
854 |
|
855 DocListType iType; |
|
856 int iColumn; /* the last column read */ |
|
857 int iPosition; /* the last position read */ |
|
858 int iStartOffset; /* the last start offset read */ |
|
859 int iEndOffset; /* the last end offset read */ |
|
860 } PLReader; |
|
861 |
|
862 static int plrAtEnd(PLReader *pReader){ |
|
863 return pReader->pData==NULL; |
|
864 } |
|
865 static int plrColumn(PLReader *pReader){ |
|
866 assert( !plrAtEnd(pReader) ); |
|
867 return pReader->iColumn; |
|
868 } |
|
869 static int plrPosition(PLReader *pReader){ |
|
870 assert( !plrAtEnd(pReader) ); |
|
871 return pReader->iPosition; |
|
872 } |
|
873 static int plrStartOffset(PLReader *pReader){ |
|
874 assert( !plrAtEnd(pReader) ); |
|
875 return pReader->iStartOffset; |
|
876 } |
|
877 static int plrEndOffset(PLReader *pReader){ |
|
878 assert( !plrAtEnd(pReader) ); |
|
879 return pReader->iEndOffset; |
|
880 } |
|
881 static void plrStep(PLReader *pReader){ |
|
882 int i, n; |
|
883 |
|
884 assert( !plrAtEnd(pReader) ); |
|
885 |
|
886 if( pReader->nData==0 ){ |
|
887 pReader->pData = NULL; |
|
888 return; |
|
889 } |
|
890 |
|
891 n = getVarint32(pReader->pData, &i); |
|
892 if( i==POS_COLUMN ){ |
|
893 n += getVarint32(pReader->pData+n, &pReader->iColumn); |
|
894 pReader->iPosition = 0; |
|
895 pReader->iStartOffset = 0; |
|
896 n += getVarint32(pReader->pData+n, &i); |
|
897 } |
|
898 /* Should never see adjacent column changes. */ |
|
899 assert( i!=POS_COLUMN ); |
|
900 |
|
901 if( i==POS_END ){ |
|
902 pReader->nData = 0; |
|
903 pReader->pData = NULL; |
|
904 return; |
|
905 } |
|
906 |
|
907 pReader->iPosition += i-POS_BASE; |
|
908 if( pReader->iType==DL_POSITIONS_OFFSETS ){ |
|
909 n += getVarint32(pReader->pData+n, &i); |
|
910 pReader->iStartOffset += i; |
|
911 n += getVarint32(pReader->pData+n, &i); |
|
912 pReader->iEndOffset = pReader->iStartOffset+i; |
|
913 } |
|
914 assert( n<=pReader->nData ); |
|
915 pReader->pData += n; |
|
916 pReader->nData -= n; |
|
917 } |
|
918 |
|
919 static void plrInit(PLReader *pReader, DLReader *pDLReader){ |
|
920 pReader->pData = dlrPosData(pDLReader); |
|
921 pReader->nData = dlrPosDataLen(pDLReader); |
|
922 pReader->iType = pDLReader->iType; |
|
923 pReader->iColumn = 0; |
|
924 pReader->iPosition = 0; |
|
925 pReader->iStartOffset = 0; |
|
926 pReader->iEndOffset = 0; |
|
927 plrStep(pReader); |
|
928 } |
|
929 static void plrDestroy(PLReader *pReader){ |
|
930 SCRAMBLE(pReader); |
|
931 } |
|
932 |
|
933 /*******************************************************************/ |
|
934 /* PLWriter is used in constructing a document's position list. As a |
|
935 ** convenience, if iType is DL_DOCIDS, PLWriter becomes a no-op. |
|
936 ** PLWriter writes to the associated DLWriter's buffer. |
|
937 ** |
|
938 ** plwInit - init for writing a document's poslist. |
|
939 ** plwDestroy - clear a writer. |
|
940 ** plwAdd - append position and offset information. |
|
941 ** plwCopy - copy next position's data from reader to writer. |
|
942 ** plwTerminate - add any necessary doclist terminator. |
|
943 ** |
|
944 ** Calling plwAdd() after plwTerminate() may result in a corrupt |
|
945 ** doclist. |
|
946 */ |
|
947 /* TODO(shess) Until we've written the second item, we can cache the |
|
948 ** first item's information. Then we'd have three states: |
|
949 ** |
|
950 ** - initialized with docid, no positions. |
|
951 ** - docid and one position. |
|
952 ** - docid and multiple positions. |
|
953 ** |
|
954 ** Only the last state needs to actually write to dlw->b, which would |
|
955 ** be an improvement in the DLCollector case. |
|
956 */ |
|
957 typedef struct PLWriter { |
|
958 DLWriter *dlw; |
|
959 |
|
960 int iColumn; /* the last column written */ |
|
961 int iPos; /* the last position written */ |
|
962 int iOffset; /* the last start offset written */ |
|
963 } PLWriter; |
|
964 |
|
965 /* TODO(shess) In the case where the parent is reading these values |
|
966 ** from a PLReader, we could optimize to a copy if that PLReader has |
|
967 ** the same type as pWriter. |
|
968 */ |
|
969 static void plwAdd(PLWriter *pWriter, int iColumn, int iPos, |
|
970 int iStartOffset, int iEndOffset){ |
|
971 /* Worst-case space for POS_COLUMN, iColumn, iPosDelta, |
|
972 ** iStartOffsetDelta, and iEndOffsetDelta. |
|
973 */ |
|
974 char c[5*VARINT_MAX]; |
|
975 int n = 0; |
|
976 |
|
977 /* Ban plwAdd() after plwTerminate(). */ |
|
978 assert( pWriter->iPos!=-1 ); |
|
979 |
|
980 if( pWriter->dlw->iType==DL_DOCIDS ) return; |
|
981 |
|
982 if( iColumn!=pWriter->iColumn ){ |
|
983 n += putVarint(c+n, POS_COLUMN); |
|
984 n += putVarint(c+n, iColumn); |
|
985 pWriter->iColumn = iColumn; |
|
986 pWriter->iPos = 0; |
|
987 pWriter->iOffset = 0; |
|
988 } |
|
989 assert( iPos>=pWriter->iPos ); |
|
990 n += putVarint(c+n, POS_BASE+(iPos-pWriter->iPos)); |
|
991 pWriter->iPos = iPos; |
|
992 if( pWriter->dlw->iType==DL_POSITIONS_OFFSETS ){ |
|
993 assert( iStartOffset>=pWriter->iOffset ); |
|
994 n += putVarint(c+n, iStartOffset-pWriter->iOffset); |
|
995 pWriter->iOffset = iStartOffset; |
|
996 assert( iEndOffset>=iStartOffset ); |
|
997 n += putVarint(c+n, iEndOffset-iStartOffset); |
|
998 } |
|
999 dataBufferAppend(pWriter->dlw->b, c, n); |
|
1000 } |
|
1001 static void plwCopy(PLWriter *pWriter, PLReader *pReader){ |
|
1002 plwAdd(pWriter, plrColumn(pReader), plrPosition(pReader), |
|
1003 plrStartOffset(pReader), plrEndOffset(pReader)); |
|
1004 } |
|
1005 static void plwInit(PLWriter *pWriter, DLWriter *dlw, sqlite_int64 iDocid){ |
|
1006 char c[VARINT_MAX]; |
|
1007 int n; |
|
1008 |
|
1009 pWriter->dlw = dlw; |
|
1010 |
|
1011 /* Docids must ascend. */ |
|
1012 assert( !pWriter->dlw->has_iPrevDocid || iDocid>pWriter->dlw->iPrevDocid ); |
|
1013 n = putVarint(c, iDocid-pWriter->dlw->iPrevDocid); |
|
1014 dataBufferAppend(pWriter->dlw->b, c, n); |
|
1015 pWriter->dlw->iPrevDocid = iDocid; |
|
1016 #ifndef NDEBUG |
|
1017 pWriter->dlw->has_iPrevDocid = 1; |
|
1018 #endif |
|
1019 |
|
1020 pWriter->iColumn = 0; |
|
1021 pWriter->iPos = 0; |
|
1022 pWriter->iOffset = 0; |
|
1023 } |
|
1024 /* TODO(shess) Should plwDestroy() also terminate the doclist? But |
|
1025 ** then plwDestroy() would no longer be just a destructor, it would |
|
1026 ** also be doing work, which isn't consistent with the overall idiom. |
|
1027 ** Another option would be for plwAdd() to always append any necessary |
|
1028 ** terminator, so that the output is always correct. But that would |
|
1029 ** add incremental work to the common case with the only benefit being |
|
1030 ** API elegance. Punt for now. |
|
1031 */ |
|
1032 static void plwTerminate(PLWriter *pWriter){ |
|
1033 if( pWriter->dlw->iType>DL_DOCIDS ){ |
|
1034 char c[VARINT_MAX]; |
|
1035 int n = putVarint(c, POS_END); |
|
1036 dataBufferAppend(pWriter->dlw->b, c, n); |
|
1037 } |
|
1038 #ifndef NDEBUG |
|
1039 /* Mark as terminated for assert in plwAdd(). */ |
|
1040 pWriter->iPos = -1; |
|
1041 #endif |
|
1042 } |
|
1043 static void plwDestroy(PLWriter *pWriter){ |
|
1044 SCRAMBLE(pWriter); |
|
1045 } |
|
1046 |
|
1047 /*******************************************************************/ |
|
1048 /* DLCollector wraps PLWriter and DLWriter to provide a |
|
1049 ** dynamically-allocated doclist area to use during tokenization. |
|
1050 ** |
|
1051 ** dlcNew - malloc up and initialize a collector. |
|
1052 ** dlcDelete - destroy a collector and all contained items. |
|
1053 ** dlcAddPos - append position and offset information. |
|
1054 ** dlcAddDoclist - add the collected doclist to the given buffer. |
|
1055 ** dlcNext - terminate the current document and open another. |
|
1056 */ |
|
1057 typedef struct DLCollector { |
|
1058 DataBuffer b; |
|
1059 DLWriter dlw; |
|
1060 PLWriter plw; |
|
1061 } DLCollector; |
|
1062 |
|
1063 /* TODO(shess) This could also be done by calling plwTerminate() and |
|
1064 ** dataBufferAppend(). I tried that, expecting nominal performance |
|
1065 ** differences, but it seemed to pretty reliably be worth 1% to code |
|
1066 ** it this way. I suspect it is the incremental malloc overhead (some |
|
1067 ** percentage of the plwTerminate() calls will cause a realloc), so |
|
1068 ** this might be worth revisiting if the DataBuffer implementation |
|
1069 ** changes. |
|
1070 */ |
|
1071 static void dlcAddDoclist(DLCollector *pCollector, DataBuffer *b){ |
|
1072 if( pCollector->dlw.iType>DL_DOCIDS ){ |
|
1073 char c[VARINT_MAX]; |
|
1074 int n = putVarint(c, POS_END); |
|
1075 dataBufferAppend2(b, pCollector->b.pData, pCollector->b.nData, c, n); |
|
1076 }else{ |
|
1077 dataBufferAppend(b, pCollector->b.pData, pCollector->b.nData); |
|
1078 } |
|
1079 } |
|
1080 static void dlcNext(DLCollector *pCollector, sqlite_int64 iDocid){ |
|
1081 plwTerminate(&pCollector->plw); |
|
1082 plwDestroy(&pCollector->plw); |
|
1083 plwInit(&pCollector->plw, &pCollector->dlw, iDocid); |
|
1084 } |
|
1085 static void dlcAddPos(DLCollector *pCollector, int iColumn, int iPos, |
|
1086 int iStartOffset, int iEndOffset){ |
|
1087 plwAdd(&pCollector->plw, iColumn, iPos, iStartOffset, iEndOffset); |
|
1088 } |
|
1089 |
|
1090 static DLCollector *dlcNew(sqlite_int64 iDocid, DocListType iType){ |
|
1091 DLCollector *pCollector = sqlite3_malloc(sizeof(DLCollector)); |
|
1092 dataBufferInit(&pCollector->b, 0); |
|
1093 dlwInit(&pCollector->dlw, iType, &pCollector->b); |
|
1094 plwInit(&pCollector->plw, &pCollector->dlw, iDocid); |
|
1095 return pCollector; |
|
1096 } |
|
1097 static void dlcDelete(DLCollector *pCollector){ |
|
1098 plwDestroy(&pCollector->plw); |
|
1099 dlwDestroy(&pCollector->dlw); |
|
1100 dataBufferDestroy(&pCollector->b); |
|
1101 SCRAMBLE(pCollector); |
|
1102 sqlite3_free(pCollector); |
|
1103 } |
|
1104 |
|
1105 |
|
1106 /* Copy the doclist data of iType in pData/nData into *out, trimming |
|
1107 ** unnecessary data as we go. Only columns matching iColumn are |
|
1108 ** copied, all columns copied if iColumn is -1. Elements with no |
|
1109 ** matching columns are dropped. The output is an iOutType doclist. |
|
1110 */ |
|
1111 /* NOTE(shess) This code is only valid after all doclists are merged. |
|
1112 ** If this is run before merges, then doclist items which represent |
|
1113 ** deletion will be trimmed, and will thus not effect a deletion |
|
1114 ** during the merge. |
|
1115 */ |
|
1116 static void docListTrim(DocListType iType, const char *pData, int nData, |
|
1117 int iColumn, DocListType iOutType, DataBuffer *out){ |
|
1118 DLReader dlReader; |
|
1119 DLWriter dlWriter; |
|
1120 |
|
1121 assert( iOutType<=iType ); |
|
1122 |
|
1123 dlrInit(&dlReader, iType, pData, nData); |
|
1124 dlwInit(&dlWriter, iOutType, out); |
|
1125 |
|
1126 while( !dlrAtEnd(&dlReader) ){ |
|
1127 PLReader plReader; |
|
1128 PLWriter plWriter; |
|
1129 int match = 0; |
|
1130 |
|
1131 plrInit(&plReader, &dlReader); |
|
1132 |
|
1133 while( !plrAtEnd(&plReader) ){ |
|
1134 if( iColumn==-1 || plrColumn(&plReader)==iColumn ){ |
|
1135 if( !match ){ |
|
1136 plwInit(&plWriter, &dlWriter, dlrDocid(&dlReader)); |
|
1137 match = 1; |
|
1138 } |
|
1139 plwAdd(&plWriter, plrColumn(&plReader), plrPosition(&plReader), |
|
1140 plrStartOffset(&plReader), plrEndOffset(&plReader)); |
|
1141 } |
|
1142 plrStep(&plReader); |
|
1143 } |
|
1144 if( match ){ |
|
1145 plwTerminate(&plWriter); |
|
1146 plwDestroy(&plWriter); |
|
1147 } |
|
1148 |
|
1149 plrDestroy(&plReader); |
|
1150 dlrStep(&dlReader); |
|
1151 } |
|
1152 dlwDestroy(&dlWriter); |
|
1153 dlrDestroy(&dlReader); |
|
1154 } |
|
1155 |
|
1156 /* Used by docListMerge() to keep doclists in the ascending order by |
|
1157 ** docid, then ascending order by age (so the newest comes first). |
|
1158 */ |
|
1159 typedef struct OrderedDLReader { |
|
1160 DLReader *pReader; |
|
1161 |
|
1162 /* TODO(shess) If we assume that docListMerge pReaders is ordered by |
|
1163 ** age (which we do), then we could use pReader comparisons to break |
|
1164 ** ties. |
|
1165 */ |
|
1166 int idx; |
|
1167 } OrderedDLReader; |
|
1168 |
|
1169 /* Order eof to end, then by docid asc, idx desc. */ |
|
1170 static int orderedDLReaderCmp(OrderedDLReader *r1, OrderedDLReader *r2){ |
|
1171 if( dlrAtEnd(r1->pReader) ){ |
|
1172 if( dlrAtEnd(r2->pReader) ) return 0; /* Both atEnd(). */ |
|
1173 return 1; /* Only r1 atEnd(). */ |
|
1174 } |
|
1175 if( dlrAtEnd(r2->pReader) ) return -1; /* Only r2 atEnd(). */ |
|
1176 |
|
1177 if( dlrDocid(r1->pReader)<dlrDocid(r2->pReader) ) return -1; |
|
1178 if( dlrDocid(r1->pReader)>dlrDocid(r2->pReader) ) return 1; |
|
1179 |
|
1180 /* Descending on idx. */ |
|
1181 return r2->idx-r1->idx; |
|
1182 } |
|
1183 |
|
1184 /* Bubble p[0] to appropriate place in p[1..n-1]. Assumes that |
|
1185 ** p[1..n-1] is already sorted. |
|
1186 */ |
|
1187 /* TODO(shess) Is this frequent enough to warrant a binary search? |
|
1188 ** Before implementing that, instrument the code to check. In most |
|
1189 ** current usage, I expect that p[0] will be less than p[1] a very |
|
1190 ** high proportion of the time. |
|
1191 */ |
|
1192 static void orderedDLReaderReorder(OrderedDLReader *p, int n){ |
|
1193 while( n>1 && orderedDLReaderCmp(p, p+1)>0 ){ |
|
1194 OrderedDLReader tmp = p[0]; |
|
1195 p[0] = p[1]; |
|
1196 p[1] = tmp; |
|
1197 n--; |
|
1198 p++; |
|
1199 } |
|
1200 } |
|
1201 |
|
1202 /* Given an array of doclist readers, merge their doclist elements |
|
1203 ** into out in sorted order (by docid), dropping elements from older |
|
1204 ** readers when there is a duplicate docid. pReaders is assumed to be |
|
1205 ** ordered by age, oldest first. |
|
1206 */ |
|
1207 /* TODO(shess) nReaders must be <= MERGE_COUNT. This should probably |
|
1208 ** be fixed. |
|
1209 */ |
|
1210 static void docListMerge(DataBuffer *out, |
|
1211 DLReader *pReaders, int nReaders){ |
|
1212 OrderedDLReader readers[MERGE_COUNT]; |
|
1213 DLWriter writer; |
|
1214 int i, n; |
|
1215 const char *pStart = 0; |
|
1216 int nStart = 0; |
|
1217 sqlite_int64 iFirstDocid = 0, iLastDocid = 0; |
|
1218 |
|
1219 assert( nReaders>0 ); |
|
1220 if( nReaders==1 ){ |
|
1221 dataBufferAppend(out, dlrDocData(pReaders), dlrAllDataBytes(pReaders)); |
|
1222 return; |
|
1223 } |
|
1224 |
|
1225 assert( nReaders<=MERGE_COUNT ); |
|
1226 n = 0; |
|
1227 for(i=0; i<nReaders; i++){ |
|
1228 assert( pReaders[i].iType==pReaders[0].iType ); |
|
1229 readers[i].pReader = pReaders+i; |
|
1230 readers[i].idx = i; |
|
1231 n += dlrAllDataBytes(&pReaders[i]); |
|
1232 } |
|
1233 /* Conservatively size output to sum of inputs. Output should end |
|
1234 ** up strictly smaller than input. |
|
1235 */ |
|
1236 dataBufferExpand(out, n); |
|
1237 |
|
1238 /* Get the readers into sorted order. */ |
|
1239 while( i-->0 ){ |
|
1240 orderedDLReaderReorder(readers+i, nReaders-i); |
|
1241 } |
|
1242 |
|
1243 dlwInit(&writer, pReaders[0].iType, out); |
|
1244 while( !dlrAtEnd(readers[0].pReader) ){ |
|
1245 sqlite_int64 iDocid = dlrDocid(readers[0].pReader); |
|
1246 |
|
1247 /* If this is a continuation of the current buffer to copy, extend |
|
1248 ** that buffer. memcpy() seems to be more efficient if it has a |
|
1249 ** lots of data to copy. |
|
1250 */ |
|
1251 if( dlrDocData(readers[0].pReader)==pStart+nStart ){ |
|
1252 nStart += dlrDocDataBytes(readers[0].pReader); |
|
1253 }else{ |
|
1254 if( pStart!=0 ){ |
|
1255 dlwAppend(&writer, pStart, nStart, iFirstDocid, iLastDocid); |
|
1256 } |
|
1257 pStart = dlrDocData(readers[0].pReader); |
|
1258 nStart = dlrDocDataBytes(readers[0].pReader); |
|
1259 iFirstDocid = iDocid; |
|
1260 } |
|
1261 iLastDocid = iDocid; |
|
1262 dlrStep(readers[0].pReader); |
|
1263 |
|
1264 /* Drop all of the older elements with the same docid. */ |
|
1265 for(i=1; i<nReaders && |
|
1266 !dlrAtEnd(readers[i].pReader) && |
|
1267 dlrDocid(readers[i].pReader)==iDocid; i++){ |
|
1268 dlrStep(readers[i].pReader); |
|
1269 } |
|
1270 |
|
1271 /* Get the readers back into order. */ |
|
1272 while( i-->0 ){ |
|
1273 orderedDLReaderReorder(readers+i, nReaders-i); |
|
1274 } |
|
1275 } |
|
1276 |
|
1277 /* Copy over any remaining elements. */ |
|
1278 if( nStart>0 ) dlwAppend(&writer, pStart, nStart, iFirstDocid, iLastDocid); |
|
1279 dlwDestroy(&writer); |
|
1280 } |
|
1281 |
|
1282 /* Helper function for posListUnion(). Compares the current position |
|
1283 ** between left and right, returning as standard C idiom of <0 if |
|
1284 ** left<right, >0 if left>right, and 0 if left==right. "End" always |
|
1285 ** compares greater. |
|
1286 */ |
|
1287 static int posListCmp(PLReader *pLeft, PLReader *pRight){ |
|
1288 assert( pLeft->iType==pRight->iType ); |
|
1289 if( pLeft->iType==DL_DOCIDS ) return 0; |
|
1290 |
|
1291 if( plrAtEnd(pLeft) ) return plrAtEnd(pRight) ? 0 : 1; |
|
1292 if( plrAtEnd(pRight) ) return -1; |
|
1293 |
|
1294 if( plrColumn(pLeft)<plrColumn(pRight) ) return -1; |
|
1295 if( plrColumn(pLeft)>plrColumn(pRight) ) return 1; |
|
1296 |
|
1297 if( plrPosition(pLeft)<plrPosition(pRight) ) return -1; |
|
1298 if( plrPosition(pLeft)>plrPosition(pRight) ) return 1; |
|
1299 if( pLeft->iType==DL_POSITIONS ) return 0; |
|
1300 |
|
1301 if( plrStartOffset(pLeft)<plrStartOffset(pRight) ) return -1; |
|
1302 if( plrStartOffset(pLeft)>plrStartOffset(pRight) ) return 1; |
|
1303 |
|
1304 if( plrEndOffset(pLeft)<plrEndOffset(pRight) ) return -1; |
|
1305 if( plrEndOffset(pLeft)>plrEndOffset(pRight) ) return 1; |
|
1306 |
|
1307 return 0; |
|
1308 } |
|
1309 |
|
1310 /* Write the union of position lists in pLeft and pRight to pOut. |
|
1311 ** "Union" in this case meaning "All unique position tuples". Should |
|
1312 ** work with any doclist type, though both inputs and the output |
|
1313 ** should be the same type. |
|
1314 */ |
|
1315 static void posListUnion(DLReader *pLeft, DLReader *pRight, DLWriter *pOut){ |
|
1316 PLReader left, right; |
|
1317 PLWriter writer; |
|
1318 |
|
1319 assert( dlrDocid(pLeft)==dlrDocid(pRight) ); |
|
1320 assert( pLeft->iType==pRight->iType ); |
|
1321 assert( pLeft->iType==pOut->iType ); |
|
1322 |
|
1323 plrInit(&left, pLeft); |
|
1324 plrInit(&right, pRight); |
|
1325 plwInit(&writer, pOut, dlrDocid(pLeft)); |
|
1326 |
|
1327 while( !plrAtEnd(&left) || !plrAtEnd(&right) ){ |
|
1328 int c = posListCmp(&left, &right); |
|
1329 if( c<0 ){ |
|
1330 plwCopy(&writer, &left); |
|
1331 plrStep(&left); |
|
1332 }else if( c>0 ){ |
|
1333 plwCopy(&writer, &right); |
|
1334 plrStep(&right); |
|
1335 }else{ |
|
1336 plwCopy(&writer, &left); |
|
1337 plrStep(&left); |
|
1338 plrStep(&right); |
|
1339 } |
|
1340 } |
|
1341 |
|
1342 plwTerminate(&writer); |
|
1343 plwDestroy(&writer); |
|
1344 plrDestroy(&left); |
|
1345 plrDestroy(&right); |
|
1346 } |
|
1347 |
|
1348 /* Write the union of doclists in pLeft and pRight to pOut. For |
|
1349 ** docids in common between the inputs, the union of the position |
|
1350 ** lists is written. Inputs and outputs are always type DL_DEFAULT. |
|
1351 */ |
|
1352 static void docListUnion( |
|
1353 const char *pLeft, int nLeft, |
|
1354 const char *pRight, int nRight, |
|
1355 DataBuffer *pOut /* Write the combined doclist here */ |
|
1356 ){ |
|
1357 DLReader left, right; |
|
1358 DLWriter writer; |
|
1359 |
|
1360 if( nLeft==0 ){ |
|
1361 if( nRight!=0) dataBufferAppend(pOut, pRight, nRight); |
|
1362 return; |
|
1363 } |
|
1364 if( nRight==0 ){ |
|
1365 dataBufferAppend(pOut, pLeft, nLeft); |
|
1366 return; |
|
1367 } |
|
1368 |
|
1369 dlrInit(&left, DL_DEFAULT, pLeft, nLeft); |
|
1370 dlrInit(&right, DL_DEFAULT, pRight, nRight); |
|
1371 dlwInit(&writer, DL_DEFAULT, pOut); |
|
1372 |
|
1373 while( !dlrAtEnd(&left) || !dlrAtEnd(&right) ){ |
|
1374 if( dlrAtEnd(&right) ){ |
|
1375 dlwCopy(&writer, &left); |
|
1376 dlrStep(&left); |
|
1377 }else if( dlrAtEnd(&left) ){ |
|
1378 dlwCopy(&writer, &right); |
|
1379 dlrStep(&right); |
|
1380 }else if( dlrDocid(&left)<dlrDocid(&right) ){ |
|
1381 dlwCopy(&writer, &left); |
|
1382 dlrStep(&left); |
|
1383 }else if( dlrDocid(&left)>dlrDocid(&right) ){ |
|
1384 dlwCopy(&writer, &right); |
|
1385 dlrStep(&right); |
|
1386 }else{ |
|
1387 posListUnion(&left, &right, &writer); |
|
1388 dlrStep(&left); |
|
1389 dlrStep(&right); |
|
1390 } |
|
1391 } |
|
1392 |
|
1393 dlrDestroy(&left); |
|
1394 dlrDestroy(&right); |
|
1395 dlwDestroy(&writer); |
|
1396 } |
|
1397 |
|
1398 /* pLeft and pRight are DLReaders positioned to the same docid. |
|
1399 ** |
|
1400 ** If there are no instances in pLeft or pRight where the position |
|
1401 ** of pLeft is one less than the position of pRight, then this |
|
1402 ** routine adds nothing to pOut. |
|
1403 ** |
|
1404 ** If there are one or more instances where positions from pLeft |
|
1405 ** are exactly one less than positions from pRight, then add a new |
|
1406 ** document record to pOut. If pOut wants to hold positions, then |
|
1407 ** include the positions from pRight that are one more than a |
|
1408 ** position in pLeft. In other words: pRight.iPos==pLeft.iPos+1. |
|
1409 */ |
|
1410 static void posListPhraseMerge(DLReader *pLeft, DLReader *pRight, |
|
1411 DLWriter *pOut){ |
|
1412 PLReader left, right; |
|
1413 PLWriter writer; |
|
1414 int match = 0; |
|
1415 |
|
1416 assert( dlrDocid(pLeft)==dlrDocid(pRight) ); |
|
1417 assert( pOut->iType!=DL_POSITIONS_OFFSETS ); |
|
1418 |
|
1419 plrInit(&left, pLeft); |
|
1420 plrInit(&right, pRight); |
|
1421 |
|
1422 while( !plrAtEnd(&left) && !plrAtEnd(&right) ){ |
|
1423 if( plrColumn(&left)<plrColumn(&right) ){ |
|
1424 plrStep(&left); |
|
1425 }else if( plrColumn(&left)>plrColumn(&right) ){ |
|
1426 plrStep(&right); |
|
1427 }else if( plrPosition(&left)+1<plrPosition(&right) ){ |
|
1428 plrStep(&left); |
|
1429 }else if( plrPosition(&left)+1>plrPosition(&right) ){ |
|
1430 plrStep(&right); |
|
1431 }else{ |
|
1432 if( !match ){ |
|
1433 plwInit(&writer, pOut, dlrDocid(pLeft)); |
|
1434 match = 1; |
|
1435 } |
|
1436 plwAdd(&writer, plrColumn(&right), plrPosition(&right), 0, 0); |
|
1437 plrStep(&left); |
|
1438 plrStep(&right); |
|
1439 } |
|
1440 } |
|
1441 |
|
1442 if( match ){ |
|
1443 plwTerminate(&writer); |
|
1444 plwDestroy(&writer); |
|
1445 } |
|
1446 |
|
1447 plrDestroy(&left); |
|
1448 plrDestroy(&right); |
|
1449 } |
|
1450 |
|
1451 /* We have two doclists with positions: pLeft and pRight. |
|
1452 ** Write the phrase intersection of these two doclists into pOut. |
|
1453 ** |
|
1454 ** A phrase intersection means that two documents only match |
|
1455 ** if pLeft.iPos+1==pRight.iPos. |
|
1456 ** |
|
1457 ** iType controls the type of data written to pOut. If iType is |
|
1458 ** DL_POSITIONS, the positions are those from pRight. |
|
1459 */ |
|
1460 static void docListPhraseMerge( |
|
1461 const char *pLeft, int nLeft, |
|
1462 const char *pRight, int nRight, |
|
1463 DocListType iType, |
|
1464 DataBuffer *pOut /* Write the combined doclist here */ |
|
1465 ){ |
|
1466 DLReader left, right; |
|
1467 DLWriter writer; |
|
1468 |
|
1469 if( nLeft==0 || nRight==0 ) return; |
|
1470 |
|
1471 assert( iType!=DL_POSITIONS_OFFSETS ); |
|
1472 |
|
1473 dlrInit(&left, DL_POSITIONS, pLeft, nLeft); |
|
1474 dlrInit(&right, DL_POSITIONS, pRight, nRight); |
|
1475 dlwInit(&writer, iType, pOut); |
|
1476 |
|
1477 while( !dlrAtEnd(&left) && !dlrAtEnd(&right) ){ |
|
1478 if( dlrDocid(&left)<dlrDocid(&right) ){ |
|
1479 dlrStep(&left); |
|
1480 }else if( dlrDocid(&right)<dlrDocid(&left) ){ |
|
1481 dlrStep(&right); |
|
1482 }else{ |
|
1483 posListPhraseMerge(&left, &right, &writer); |
|
1484 dlrStep(&left); |
|
1485 dlrStep(&right); |
|
1486 } |
|
1487 } |
|
1488 |
|
1489 dlrDestroy(&left); |
|
1490 dlrDestroy(&right); |
|
1491 dlwDestroy(&writer); |
|
1492 } |
|
1493 |
|
1494 /* We have two DL_DOCIDS doclists: pLeft and pRight. |
|
1495 ** Write the intersection of these two doclists into pOut as a |
|
1496 ** DL_DOCIDS doclist. |
|
1497 */ |
|
1498 static void docListAndMerge( |
|
1499 const char *pLeft, int nLeft, |
|
1500 const char *pRight, int nRight, |
|
1501 DataBuffer *pOut /* Write the combined doclist here */ |
|
1502 ){ |
|
1503 DLReader left, right; |
|
1504 DLWriter writer; |
|
1505 |
|
1506 if( nLeft==0 || nRight==0 ) return; |
|
1507 |
|
1508 dlrInit(&left, DL_DOCIDS, pLeft, nLeft); |
|
1509 dlrInit(&right, DL_DOCIDS, pRight, nRight); |
|
1510 dlwInit(&writer, DL_DOCIDS, pOut); |
|
1511 |
|
1512 while( !dlrAtEnd(&left) && !dlrAtEnd(&right) ){ |
|
1513 if( dlrDocid(&left)<dlrDocid(&right) ){ |
|
1514 dlrStep(&left); |
|
1515 }else if( dlrDocid(&right)<dlrDocid(&left) ){ |
|
1516 dlrStep(&right); |
|
1517 }else{ |
|
1518 dlwAdd(&writer, dlrDocid(&left)); |
|
1519 dlrStep(&left); |
|
1520 dlrStep(&right); |
|
1521 } |
|
1522 } |
|
1523 |
|
1524 dlrDestroy(&left); |
|
1525 dlrDestroy(&right); |
|
1526 dlwDestroy(&writer); |
|
1527 } |
|
1528 |
|
1529 /* We have two DL_DOCIDS doclists: pLeft and pRight. |
|
1530 ** Write the union of these two doclists into pOut as a |
|
1531 ** DL_DOCIDS doclist. |
|
1532 */ |
|
1533 static void docListOrMerge( |
|
1534 const char *pLeft, int nLeft, |
|
1535 const char *pRight, int nRight, |
|
1536 DataBuffer *pOut /* Write the combined doclist here */ |
|
1537 ){ |
|
1538 DLReader left, right; |
|
1539 DLWriter writer; |
|
1540 |
|
1541 if( nLeft==0 ){ |
|
1542 if( nRight!=0 ) dataBufferAppend(pOut, pRight, nRight); |
|
1543 return; |
|
1544 } |
|
1545 if( nRight==0 ){ |
|
1546 dataBufferAppend(pOut, pLeft, nLeft); |
|
1547 return; |
|
1548 } |
|
1549 |
|
1550 dlrInit(&left, DL_DOCIDS, pLeft, nLeft); |
|
1551 dlrInit(&right, DL_DOCIDS, pRight, nRight); |
|
1552 dlwInit(&writer, DL_DOCIDS, pOut); |
|
1553 |
|
1554 while( !dlrAtEnd(&left) || !dlrAtEnd(&right) ){ |
|
1555 if( dlrAtEnd(&right) ){ |
|
1556 dlwAdd(&writer, dlrDocid(&left)); |
|
1557 dlrStep(&left); |
|
1558 }else if( dlrAtEnd(&left) ){ |
|
1559 dlwAdd(&writer, dlrDocid(&right)); |
|
1560 dlrStep(&right); |
|
1561 }else if( dlrDocid(&left)<dlrDocid(&right) ){ |
|
1562 dlwAdd(&writer, dlrDocid(&left)); |
|
1563 dlrStep(&left); |
|
1564 }else if( dlrDocid(&right)<dlrDocid(&left) ){ |
|
1565 dlwAdd(&writer, dlrDocid(&right)); |
|
1566 dlrStep(&right); |
|
1567 }else{ |
|
1568 dlwAdd(&writer, dlrDocid(&left)); |
|
1569 dlrStep(&left); |
|
1570 dlrStep(&right); |
|
1571 } |
|
1572 } |
|
1573 |
|
1574 dlrDestroy(&left); |
|
1575 dlrDestroy(&right); |
|
1576 dlwDestroy(&writer); |
|
1577 } |
|
1578 |
|
1579 /* We have two DL_DOCIDS doclists: pLeft and pRight. |
|
1580 ** Write into pOut as DL_DOCIDS doclist containing all documents that |
|
1581 ** occur in pLeft but not in pRight. |
|
1582 */ |
|
1583 static void docListExceptMerge( |
|
1584 const char *pLeft, int nLeft, |
|
1585 const char *pRight, int nRight, |
|
1586 DataBuffer *pOut /* Write the combined doclist here */ |
|
1587 ){ |
|
1588 DLReader left, right; |
|
1589 DLWriter writer; |
|
1590 |
|
1591 if( nLeft==0 ) return; |
|
1592 if( nRight==0 ){ |
|
1593 dataBufferAppend(pOut, pLeft, nLeft); |
|
1594 return; |
|
1595 } |
|
1596 |
|
1597 dlrInit(&left, DL_DOCIDS, pLeft, nLeft); |
|
1598 dlrInit(&right, DL_DOCIDS, pRight, nRight); |
|
1599 dlwInit(&writer, DL_DOCIDS, pOut); |
|
1600 |
|
1601 while( !dlrAtEnd(&left) ){ |
|
1602 while( !dlrAtEnd(&right) && dlrDocid(&right)<dlrDocid(&left) ){ |
|
1603 dlrStep(&right); |
|
1604 } |
|
1605 if( dlrAtEnd(&right) || dlrDocid(&left)<dlrDocid(&right) ){ |
|
1606 dlwAdd(&writer, dlrDocid(&left)); |
|
1607 } |
|
1608 dlrStep(&left); |
|
1609 } |
|
1610 |
|
1611 dlrDestroy(&left); |
|
1612 dlrDestroy(&right); |
|
1613 dlwDestroy(&writer); |
|
1614 } |
|
1615 |
|
1616 static char *string_dup_n(const char *s, int n){ |
|
1617 char *str = sqlite3_malloc(n + 1); |
|
1618 memcpy(str, s, n); |
|
1619 str[n] = '\0'; |
|
1620 return str; |
|
1621 } |
|
1622 |
|
1623 /* Duplicate a string; the caller must free() the returned string. |
|
1624 * (We don't use strdup() since it is not part of the standard C library and |
|
1625 * may not be available everywhere.) */ |
|
1626 static char *string_dup(const char *s){ |
|
1627 return string_dup_n(s, strlen(s)); |
|
1628 } |
|
1629 |
|
1630 /* Format a string, replacing each occurrence of the % character with |
|
1631 * zDb.zName. This may be more convenient than sqlite_mprintf() |
|
1632 * when one string is used repeatedly in a format string. |
|
1633 * The caller must free() the returned string. */ |
|
1634 static char *string_format(const char *zFormat, |
|
1635 const char *zDb, const char *zName){ |
|
1636 const char *p; |
|
1637 size_t len = 0; |
|
1638 size_t nDb = strlen(zDb); |
|
1639 size_t nName = strlen(zName); |
|
1640 size_t nFullTableName = nDb+1+nName; |
|
1641 char *result; |
|
1642 char *r; |
|
1643 |
|
1644 /* first compute length needed */ |
|
1645 for(p = zFormat ; *p ; ++p){ |
|
1646 len += (*p=='%' ? nFullTableName : 1); |
|
1647 } |
|
1648 len += 1; /* for null terminator */ |
|
1649 |
|
1650 r = result = sqlite3_malloc(len); |
|
1651 for(p = zFormat; *p; ++p){ |
|
1652 if( *p=='%' ){ |
|
1653 memcpy(r, zDb, nDb); |
|
1654 r += nDb; |
|
1655 *r++ = '.'; |
|
1656 memcpy(r, zName, nName); |
|
1657 r += nName; |
|
1658 } else { |
|
1659 *r++ = *p; |
|
1660 } |
|
1661 } |
|
1662 *r++ = '\0'; |
|
1663 assert( r == result + len ); |
|
1664 return result; |
|
1665 } |
|
1666 |
|
1667 static int sql_exec(sqlite3 *db, const char *zDb, const char *zName, |
|
1668 const char *zFormat){ |
|
1669 char *zCommand = string_format(zFormat, zDb, zName); |
|
1670 int rc; |
|
1671 TRACE(("FTS2 sql: %s\n", zCommand)); |
|
1672 rc = sqlite3_exec(db, zCommand, NULL, 0, NULL); |
|
1673 sqlite3_free(zCommand); |
|
1674 return rc; |
|
1675 } |
|
1676 |
|
1677 static int sql_prepare(sqlite3 *db, const char *zDb, const char *zName, |
|
1678 sqlite3_stmt **ppStmt, const char *zFormat){ |
|
1679 char *zCommand = string_format(zFormat, zDb, zName); |
|
1680 int rc; |
|
1681 TRACE(("FTS2 prepare: %s\n", zCommand)); |
|
1682 rc = sqlite3_prepare_v2(db, zCommand, -1, ppStmt, NULL); |
|
1683 sqlite3_free(zCommand); |
|
1684 return rc; |
|
1685 } |
|
1686 |
|
1687 /* end utility functions */ |
|
1688 |
|
1689 /* Forward reference */ |
|
1690 typedef struct fulltext_vtab fulltext_vtab; |
|
1691 |
|
1692 /* A single term in a query is represented by an instances of |
|
1693 ** the following structure. |
|
1694 */ |
|
1695 typedef struct QueryTerm { |
|
1696 short int nPhrase; /* How many following terms are part of the same phrase */ |
|
1697 short int iPhrase; /* This is the i-th term of a phrase. */ |
|
1698 short int iColumn; /* Column of the index that must match this term */ |
|
1699 signed char isOr; /* this term is preceded by "OR" */ |
|
1700 signed char isNot; /* this term is preceded by "-" */ |
|
1701 signed char isPrefix; /* this term is followed by "*" */ |
|
1702 char *pTerm; /* text of the term. '\000' terminated. malloced */ |
|
1703 int nTerm; /* Number of bytes in pTerm[] */ |
|
1704 } QueryTerm; |
|
1705 |
|
1706 |
|
1707 /* A query string is parsed into a Query structure. |
|
1708 * |
|
1709 * We could, in theory, allow query strings to be complicated |
|
1710 * nested expressions with precedence determined by parentheses. |
|
1711 * But none of the major search engines do this. (Perhaps the |
|
1712 * feeling is that an parenthesized expression is two complex of |
|
1713 * an idea for the average user to grasp.) Taking our lead from |
|
1714 * the major search engines, we will allow queries to be a list |
|
1715 * of terms (with an implied AND operator) or phrases in double-quotes, |
|
1716 * with a single optional "-" before each non-phrase term to designate |
|
1717 * negation and an optional OR connector. |
|
1718 * |
|
1719 * OR binds more tightly than the implied AND, which is what the |
|
1720 * major search engines seem to do. So, for example: |
|
1721 * |
|
1722 * [one two OR three] ==> one AND (two OR three) |
|
1723 * [one OR two three] ==> (one OR two) AND three |
|
1724 * |
|
1725 * A "-" before a term matches all entries that lack that term. |
|
1726 * The "-" must occur immediately before the term with in intervening |
|
1727 * space. This is how the search engines do it. |
|
1728 * |
|
1729 * A NOT term cannot be the right-hand operand of an OR. If this |
|
1730 * occurs in the query string, the NOT is ignored: |
|
1731 * |
|
1732 * [one OR -two] ==> one OR two |
|
1733 * |
|
1734 */ |
|
1735 typedef struct Query { |
|
1736 fulltext_vtab *pFts; /* The full text index */ |
|
1737 int nTerms; /* Number of terms in the query */ |
|
1738 QueryTerm *pTerms; /* Array of terms. Space obtained from malloc() */ |
|
1739 int nextIsOr; /* Set the isOr flag on the next inserted term */ |
|
1740 int nextColumn; /* Next word parsed must be in this column */ |
|
1741 int dfltColumn; /* The default column */ |
|
1742 } Query; |
|
1743 |
|
1744 |
|
1745 /* |
|
1746 ** An instance of the following structure keeps track of generated |
|
1747 ** matching-word offset information and snippets. |
|
1748 */ |
|
1749 typedef struct Snippet { |
|
1750 int nMatch; /* Total number of matches */ |
|
1751 int nAlloc; /* Space allocated for aMatch[] */ |
|
1752 struct snippetMatch { /* One entry for each matching term */ |
|
1753 char snStatus; /* Status flag for use while constructing snippets */ |
|
1754 short int iCol; /* The column that contains the match */ |
|
1755 short int iTerm; /* The index in Query.pTerms[] of the matching term */ |
|
1756 short int nByte; /* Number of bytes in the term */ |
|
1757 int iStart; /* The offset to the first character of the term */ |
|
1758 } *aMatch; /* Points to space obtained from malloc */ |
|
1759 char *zOffset; /* Text rendering of aMatch[] */ |
|
1760 int nOffset; /* strlen(zOffset) */ |
|
1761 char *zSnippet; /* Snippet text */ |
|
1762 int nSnippet; /* strlen(zSnippet) */ |
|
1763 } Snippet; |
|
1764 |
|
1765 |
|
1766 typedef enum QueryType { |
|
1767 QUERY_GENERIC, /* table scan */ |
|
1768 QUERY_ROWID, /* lookup by rowid */ |
|
1769 QUERY_FULLTEXT /* QUERY_FULLTEXT + [i] is a full-text search for column i*/ |
|
1770 } QueryType; |
|
1771 |
|
1772 typedef enum fulltext_statement { |
|
1773 CONTENT_INSERT_STMT, |
|
1774 CONTENT_SELECT_STMT, |
|
1775 CONTENT_UPDATE_STMT, |
|
1776 CONTENT_DELETE_STMT, |
|
1777 CONTENT_EXISTS_STMT, |
|
1778 |
|
1779 BLOCK_INSERT_STMT, |
|
1780 BLOCK_SELECT_STMT, |
|
1781 BLOCK_DELETE_STMT, |
|
1782 BLOCK_DELETE_ALL_STMT, |
|
1783 |
|
1784 SEGDIR_MAX_INDEX_STMT, |
|
1785 SEGDIR_SET_STMT, |
|
1786 SEGDIR_SELECT_LEVEL_STMT, |
|
1787 SEGDIR_SPAN_STMT, |
|
1788 SEGDIR_DELETE_STMT, |
|
1789 SEGDIR_SELECT_SEGMENT_STMT, |
|
1790 SEGDIR_SELECT_ALL_STMT, |
|
1791 SEGDIR_DELETE_ALL_STMT, |
|
1792 SEGDIR_COUNT_STMT, |
|
1793 |
|
1794 MAX_STMT /* Always at end! */ |
|
1795 } fulltext_statement; |
|
1796 |
|
1797 /* These must exactly match the enum above. */ |
|
1798 /* TODO(shess): Is there some risk that a statement will be used in two |
|
1799 ** cursors at once, e.g. if a query joins a virtual table to itself? |
|
1800 ** If so perhaps we should move some of these to the cursor object. |
|
1801 */ |
|
1802 static const char *const fulltext_zStatement[MAX_STMT] = { |
|
1803 /* CONTENT_INSERT */ NULL, /* generated in contentInsertStatement() */ |
|
1804 /* CONTENT_SELECT */ "select * from %_content where rowid = ?", |
|
1805 /* CONTENT_UPDATE */ NULL, /* generated in contentUpdateStatement() */ |
|
1806 /* CONTENT_DELETE */ "delete from %_content where rowid = ?", |
|
1807 /* CONTENT_EXISTS */ "select rowid from %_content limit 1", |
|
1808 |
|
1809 /* BLOCK_INSERT */ "insert into %_segments values (?)", |
|
1810 /* BLOCK_SELECT */ "select block from %_segments where rowid = ?", |
|
1811 /* BLOCK_DELETE */ "delete from %_segments where rowid between ? and ?", |
|
1812 /* BLOCK_DELETE_ALL */ "delete from %_segments", |
|
1813 |
|
1814 /* SEGDIR_MAX_INDEX */ "select max(idx) from %_segdir where level = ?", |
|
1815 /* SEGDIR_SET */ "insert into %_segdir values (?, ?, ?, ?, ?, ?)", |
|
1816 /* SEGDIR_SELECT_LEVEL */ |
|
1817 "select start_block, leaves_end_block, root from %_segdir " |
|
1818 " where level = ? order by idx", |
|
1819 /* SEGDIR_SPAN */ |
|
1820 "select min(start_block), max(end_block) from %_segdir " |
|
1821 " where level = ? and start_block <> 0", |
|
1822 /* SEGDIR_DELETE */ "delete from %_segdir where level = ?", |
|
1823 |
|
1824 /* NOTE(shess): The first three results of the following two |
|
1825 ** statements must match. |
|
1826 */ |
|
1827 /* SEGDIR_SELECT_SEGMENT */ |
|
1828 "select start_block, leaves_end_block, root from %_segdir " |
|
1829 " where level = ? and idx = ?", |
|
1830 /* SEGDIR_SELECT_ALL */ |
|
1831 "select start_block, leaves_end_block, root from %_segdir " |
|
1832 " order by level desc, idx asc", |
|
1833 /* SEGDIR_DELETE_ALL */ "delete from %_segdir", |
|
1834 /* SEGDIR_COUNT */ "select count(*), ifnull(max(level),0) from %_segdir", |
|
1835 }; |
|
1836 |
|
1837 /* |
|
1838 ** A connection to a fulltext index is an instance of the following |
|
1839 ** structure. The xCreate and xConnect methods create an instance |
|
1840 ** of this structure and xDestroy and xDisconnect free that instance. |
|
1841 ** All other methods receive a pointer to the structure as one of their |
|
1842 ** arguments. |
|
1843 */ |
|
1844 struct fulltext_vtab { |
|
1845 sqlite3_vtab base; /* Base class used by SQLite core */ |
|
1846 sqlite3 *db; /* The database connection */ |
|
1847 const char *zDb; /* logical database name */ |
|
1848 const char *zName; /* virtual table name */ |
|
1849 int nColumn; /* number of columns in virtual table */ |
|
1850 char **azColumn; /* column names. malloced */ |
|
1851 char **azContentColumn; /* column names in content table; malloced */ |
|
1852 sqlite3_tokenizer *pTokenizer; /* tokenizer for inserts and queries */ |
|
1853 |
|
1854 /* Precompiled statements which we keep as long as the table is |
|
1855 ** open. |
|
1856 */ |
|
1857 sqlite3_stmt *pFulltextStatements[MAX_STMT]; |
|
1858 |
|
1859 /* Precompiled statements used for segment merges. We run a |
|
1860 ** separate select across the leaf level of each tree being merged. |
|
1861 */ |
|
1862 sqlite3_stmt *pLeafSelectStmts[MERGE_COUNT]; |
|
1863 /* The statement used to prepare pLeafSelectStmts. */ |
|
1864 #define LEAF_SELECT \ |
|
1865 "select block from %_segments where rowid between ? and ? order by rowid" |
|
1866 |
|
1867 /* These buffer pending index updates during transactions. |
|
1868 ** nPendingData estimates the memory size of the pending data. It |
|
1869 ** doesn't include the hash-bucket overhead, nor any malloc |
|
1870 ** overhead. When nPendingData exceeds kPendingThreshold, the |
|
1871 ** buffer is flushed even before the transaction closes. |
|
1872 ** pendingTerms stores the data, and is only valid when nPendingData |
|
1873 ** is >=0 (nPendingData<0 means pendingTerms has not been |
|
1874 ** initialized). iPrevDocid is the last docid written, used to make |
|
1875 ** certain we're inserting in sorted order. |
|
1876 */ |
|
1877 int nPendingData; |
|
1878 #define kPendingThreshold (1*1024*1024) |
|
1879 sqlite_int64 iPrevDocid; |
|
1880 fts2Hash pendingTerms; |
|
1881 }; |
|
1882 |
|
1883 /* |
|
1884 ** When the core wants to do a query, it create a cursor using a |
|
1885 ** call to xOpen. This structure is an instance of a cursor. It |
|
1886 ** is destroyed by xClose. |
|
1887 */ |
|
1888 typedef struct fulltext_cursor { |
|
1889 sqlite3_vtab_cursor base; /* Base class used by SQLite core */ |
|
1890 QueryType iCursorType; /* Copy of sqlite3_index_info.idxNum */ |
|
1891 sqlite3_stmt *pStmt; /* Prepared statement in use by the cursor */ |
|
1892 int eof; /* True if at End Of Results */ |
|
1893 Query q; /* Parsed query string */ |
|
1894 Snippet snippet; /* Cached snippet for the current row */ |
|
1895 int iColumn; /* Column being searched */ |
|
1896 DataBuffer result; /* Doclist results from fulltextQuery */ |
|
1897 DLReader reader; /* Result reader if result not empty */ |
|
1898 } fulltext_cursor; |
|
1899 |
|
1900 static struct fulltext_vtab *cursor_vtab(fulltext_cursor *c){ |
|
1901 return (fulltext_vtab *) c->base.pVtab; |
|
1902 } |
|
1903 |
|
1904 static const sqlite3_module fts2Module; /* forward declaration */ |
|
1905 |
|
1906 /* Return a dynamically generated statement of the form |
|
1907 * insert into %_content (rowid, ...) values (?, ...) |
|
1908 */ |
|
1909 static const char *contentInsertStatement(fulltext_vtab *v){ |
|
1910 StringBuffer sb; |
|
1911 int i; |
|
1912 |
|
1913 initStringBuffer(&sb); |
|
1914 append(&sb, "insert into %_content (rowid, "); |
|
1915 appendList(&sb, v->nColumn, v->azContentColumn); |
|
1916 append(&sb, ") values (?"); |
|
1917 for(i=0; i<v->nColumn; ++i) |
|
1918 append(&sb, ", ?"); |
|
1919 append(&sb, ")"); |
|
1920 return stringBufferData(&sb); |
|
1921 } |
|
1922 |
|
1923 /* Return a dynamically generated statement of the form |
|
1924 * update %_content set [col_0] = ?, [col_1] = ?, ... |
|
1925 * where rowid = ? |
|
1926 */ |
|
1927 static const char *contentUpdateStatement(fulltext_vtab *v){ |
|
1928 StringBuffer sb; |
|
1929 int i; |
|
1930 |
|
1931 initStringBuffer(&sb); |
|
1932 append(&sb, "update %_content set "); |
|
1933 for(i=0; i<v->nColumn; ++i) { |
|
1934 if( i>0 ){ |
|
1935 append(&sb, ", "); |
|
1936 } |
|
1937 append(&sb, v->azContentColumn[i]); |
|
1938 append(&sb, " = ?"); |
|
1939 } |
|
1940 append(&sb, " where rowid = ?"); |
|
1941 return stringBufferData(&sb); |
|
1942 } |
|
1943 |
|
1944 /* Puts a freshly-prepared statement determined by iStmt in *ppStmt. |
|
1945 ** If the indicated statement has never been prepared, it is prepared |
|
1946 ** and cached, otherwise the cached version is reset. |
|
1947 */ |
|
1948 static int sql_get_statement(fulltext_vtab *v, fulltext_statement iStmt, |
|
1949 sqlite3_stmt **ppStmt){ |
|
1950 assert( iStmt<MAX_STMT ); |
|
1951 if( v->pFulltextStatements[iStmt]==NULL ){ |
|
1952 const char *zStmt; |
|
1953 int rc; |
|
1954 switch( iStmt ){ |
|
1955 case CONTENT_INSERT_STMT: |
|
1956 zStmt = contentInsertStatement(v); break; |
|
1957 case CONTENT_UPDATE_STMT: |
|
1958 zStmt = contentUpdateStatement(v); break; |
|
1959 default: |
|
1960 zStmt = fulltext_zStatement[iStmt]; |
|
1961 } |
|
1962 rc = sql_prepare(v->db, v->zDb, v->zName, &v->pFulltextStatements[iStmt], |
|
1963 zStmt); |
|
1964 if( zStmt != fulltext_zStatement[iStmt]) sqlite3_free((void *) zStmt); |
|
1965 if( rc!=SQLITE_OK ) return rc; |
|
1966 } else { |
|
1967 int rc = sqlite3_reset(v->pFulltextStatements[iStmt]); |
|
1968 if( rc!=SQLITE_OK ) return rc; |
|
1969 } |
|
1970 |
|
1971 *ppStmt = v->pFulltextStatements[iStmt]; |
|
1972 return SQLITE_OK; |
|
1973 } |
|
1974 |
|
1975 /* Like sqlite3_step(), but convert SQLITE_DONE to SQLITE_OK and |
|
1976 ** SQLITE_ROW to SQLITE_ERROR. Useful for statements like UPDATE, |
|
1977 ** where we expect no results. |
|
1978 */ |
|
1979 static int sql_single_step(sqlite3_stmt *s){ |
|
1980 int rc = sqlite3_step(s); |
|
1981 return (rc==SQLITE_DONE) ? SQLITE_OK : rc; |
|
1982 } |
|
1983 |
|
1984 /* Like sql_get_statement(), but for special replicated LEAF_SELECT |
|
1985 ** statements. idx -1 is a special case for an uncached version of |
|
1986 ** the statement (used in the optimize implementation). |
|
1987 */ |
|
1988 /* TODO(shess) Write version for generic statements and then share |
|
1989 ** that between the cached-statement functions. |
|
1990 */ |
|
1991 static int sql_get_leaf_statement(fulltext_vtab *v, int idx, |
|
1992 sqlite3_stmt **ppStmt){ |
|
1993 assert( idx>=-1 && idx<MERGE_COUNT ); |
|
1994 if( idx==-1 ){ |
|
1995 return sql_prepare(v->db, v->zDb, v->zName, ppStmt, LEAF_SELECT); |
|
1996 }else if( v->pLeafSelectStmts[idx]==NULL ){ |
|
1997 int rc = sql_prepare(v->db, v->zDb, v->zName, &v->pLeafSelectStmts[idx], |
|
1998 LEAF_SELECT); |
|
1999 if( rc!=SQLITE_OK ) return rc; |
|
2000 }else{ |
|
2001 int rc = sqlite3_reset(v->pLeafSelectStmts[idx]); |
|
2002 if( rc!=SQLITE_OK ) return rc; |
|
2003 } |
|
2004 |
|
2005 *ppStmt = v->pLeafSelectStmts[idx]; |
|
2006 return SQLITE_OK; |
|
2007 } |
|
2008 |
|
2009 /* insert into %_content (rowid, ...) values ([rowid], [pValues]) */ |
|
2010 static int content_insert(fulltext_vtab *v, sqlite3_value *rowid, |
|
2011 sqlite3_value **pValues){ |
|
2012 sqlite3_stmt *s; |
|
2013 int i; |
|
2014 int rc = sql_get_statement(v, CONTENT_INSERT_STMT, &s); |
|
2015 if( rc!=SQLITE_OK ) return rc; |
|
2016 |
|
2017 rc = sqlite3_bind_value(s, 1, rowid); |
|
2018 if( rc!=SQLITE_OK ) return rc; |
|
2019 |
|
2020 for(i=0; i<v->nColumn; ++i){ |
|
2021 rc = sqlite3_bind_value(s, 2+i, pValues[i]); |
|
2022 if( rc!=SQLITE_OK ) return rc; |
|
2023 } |
|
2024 |
|
2025 return sql_single_step(s); |
|
2026 } |
|
2027 |
|
2028 /* update %_content set col0 = pValues[0], col1 = pValues[1], ... |
|
2029 * where rowid = [iRowid] */ |
|
2030 static int content_update(fulltext_vtab *v, sqlite3_value **pValues, |
|
2031 sqlite_int64 iRowid){ |
|
2032 sqlite3_stmt *s; |
|
2033 int i; |
|
2034 int rc = sql_get_statement(v, CONTENT_UPDATE_STMT, &s); |
|
2035 if( rc!=SQLITE_OK ) return rc; |
|
2036 |
|
2037 for(i=0; i<v->nColumn; ++i){ |
|
2038 rc = sqlite3_bind_value(s, 1+i, pValues[i]); |
|
2039 if( rc!=SQLITE_OK ) return rc; |
|
2040 } |
|
2041 |
|
2042 rc = sqlite3_bind_int64(s, 1+v->nColumn, iRowid); |
|
2043 if( rc!=SQLITE_OK ) return rc; |
|
2044 |
|
2045 return sql_single_step(s); |
|
2046 } |
|
2047 |
|
2048 static void freeStringArray(int nString, const char **pString){ |
|
2049 int i; |
|
2050 |
|
2051 for (i=0 ; i < nString ; ++i) { |
|
2052 if( pString[i]!=NULL ) sqlite3_free((void *) pString[i]); |
|
2053 } |
|
2054 sqlite3_free((void *) pString); |
|
2055 } |
|
2056 |
|
2057 /* select * from %_content where rowid = [iRow] |
|
2058 * The caller must delete the returned array and all strings in it. |
|
2059 * null fields will be NULL in the returned array. |
|
2060 * |
|
2061 * TODO: Perhaps we should return pointer/length strings here for consistency |
|
2062 * with other code which uses pointer/length. */ |
|
2063 static int content_select(fulltext_vtab *v, sqlite_int64 iRow, |
|
2064 const char ***pValues){ |
|
2065 sqlite3_stmt *s; |
|
2066 const char **values; |
|
2067 int i; |
|
2068 int rc; |
|
2069 |
|
2070 *pValues = NULL; |
|
2071 |
|
2072 rc = sql_get_statement(v, CONTENT_SELECT_STMT, &s); |
|
2073 if( rc!=SQLITE_OK ) return rc; |
|
2074 |
|
2075 rc = sqlite3_bind_int64(s, 1, iRow); |
|
2076 if( rc!=SQLITE_OK ) return rc; |
|
2077 |
|
2078 rc = sqlite3_step(s); |
|
2079 if( rc!=SQLITE_ROW ) return rc; |
|
2080 |
|
2081 values = (const char **) sqlite3_malloc(v->nColumn * sizeof(const char *)); |
|
2082 for(i=0; i<v->nColumn; ++i){ |
|
2083 if( sqlite3_column_type(s, i)==SQLITE_NULL ){ |
|
2084 values[i] = NULL; |
|
2085 }else{ |
|
2086 values[i] = string_dup((char*)sqlite3_column_text(s, i)); |
|
2087 } |
|
2088 } |
|
2089 |
|
2090 /* We expect only one row. We must execute another sqlite3_step() |
|
2091 * to complete the iteration; otherwise the table will remain locked. */ |
|
2092 rc = sqlite3_step(s); |
|
2093 if( rc==SQLITE_DONE ){ |
|
2094 *pValues = values; |
|
2095 return SQLITE_OK; |
|
2096 } |
|
2097 |
|
2098 freeStringArray(v->nColumn, values); |
|
2099 return rc; |
|
2100 } |
|
2101 |
|
2102 /* delete from %_content where rowid = [iRow ] */ |
|
2103 static int content_delete(fulltext_vtab *v, sqlite_int64 iRow){ |
|
2104 sqlite3_stmt *s; |
|
2105 int rc = sql_get_statement(v, CONTENT_DELETE_STMT, &s); |
|
2106 if( rc!=SQLITE_OK ) return rc; |
|
2107 |
|
2108 rc = sqlite3_bind_int64(s, 1, iRow); |
|
2109 if( rc!=SQLITE_OK ) return rc; |
|
2110 |
|
2111 return sql_single_step(s); |
|
2112 } |
|
2113 |
|
2114 /* Returns SQLITE_ROW if any rows exist in %_content, SQLITE_DONE if |
|
2115 ** no rows exist, and any error in case of failure. |
|
2116 */ |
|
2117 static int content_exists(fulltext_vtab *v){ |
|
2118 sqlite3_stmt *s; |
|
2119 int rc = sql_get_statement(v, CONTENT_EXISTS_STMT, &s); |
|
2120 if( rc!=SQLITE_OK ) return rc; |
|
2121 |
|
2122 rc = sqlite3_step(s); |
|
2123 if( rc!=SQLITE_ROW ) return rc; |
|
2124 |
|
2125 /* We expect only one row. We must execute another sqlite3_step() |
|
2126 * to complete the iteration; otherwise the table will remain locked. */ |
|
2127 rc = sqlite3_step(s); |
|
2128 if( rc==SQLITE_DONE ) return SQLITE_ROW; |
|
2129 if( rc==SQLITE_ROW ) return SQLITE_ERROR; |
|
2130 return rc; |
|
2131 } |
|
2132 |
|
2133 /* insert into %_segments values ([pData]) |
|
2134 ** returns assigned rowid in *piBlockid |
|
2135 */ |
|
2136 static int block_insert(fulltext_vtab *v, const char *pData, int nData, |
|
2137 sqlite_int64 *piBlockid){ |
|
2138 sqlite3_stmt *s; |
|
2139 int rc = sql_get_statement(v, BLOCK_INSERT_STMT, &s); |
|
2140 if( rc!=SQLITE_OK ) return rc; |
|
2141 |
|
2142 rc = sqlite3_bind_blob(s, 1, pData, nData, SQLITE_STATIC); |
|
2143 if( rc!=SQLITE_OK ) return rc; |
|
2144 |
|
2145 rc = sqlite3_step(s); |
|
2146 if( rc==SQLITE_ROW ) return SQLITE_ERROR; |
|
2147 if( rc!=SQLITE_DONE ) return rc; |
|
2148 |
|
2149 *piBlockid = sqlite3_last_insert_rowid(v->db); |
|
2150 return SQLITE_OK; |
|
2151 } |
|
2152 |
|
2153 /* delete from %_segments |
|
2154 ** where rowid between [iStartBlockid] and [iEndBlockid] |
|
2155 ** |
|
2156 ** Deletes the range of blocks, inclusive, used to delete the blocks |
|
2157 ** which form a segment. |
|
2158 */ |
|
2159 static int block_delete(fulltext_vtab *v, |
|
2160 sqlite_int64 iStartBlockid, sqlite_int64 iEndBlockid){ |
|
2161 sqlite3_stmt *s; |
|
2162 int rc = sql_get_statement(v, BLOCK_DELETE_STMT, &s); |
|
2163 if( rc!=SQLITE_OK ) return rc; |
|
2164 |
|
2165 rc = sqlite3_bind_int64(s, 1, iStartBlockid); |
|
2166 if( rc!=SQLITE_OK ) return rc; |
|
2167 |
|
2168 rc = sqlite3_bind_int64(s, 2, iEndBlockid); |
|
2169 if( rc!=SQLITE_OK ) return rc; |
|
2170 |
|
2171 return sql_single_step(s); |
|
2172 } |
|
2173 |
|
2174 /* Returns SQLITE_ROW with *pidx set to the maximum segment idx found |
|
2175 ** at iLevel. Returns SQLITE_DONE if there are no segments at |
|
2176 ** iLevel. Otherwise returns an error. |
|
2177 */ |
|
2178 static int segdir_max_index(fulltext_vtab *v, int iLevel, int *pidx){ |
|
2179 sqlite3_stmt *s; |
|
2180 int rc = sql_get_statement(v, SEGDIR_MAX_INDEX_STMT, &s); |
|
2181 if( rc!=SQLITE_OK ) return rc; |
|
2182 |
|
2183 rc = sqlite3_bind_int(s, 1, iLevel); |
|
2184 if( rc!=SQLITE_OK ) return rc; |
|
2185 |
|
2186 rc = sqlite3_step(s); |
|
2187 /* Should always get at least one row due to how max() works. */ |
|
2188 if( rc==SQLITE_DONE ) return SQLITE_DONE; |
|
2189 if( rc!=SQLITE_ROW ) return rc; |
|
2190 |
|
2191 /* NULL means that there were no inputs to max(). */ |
|
2192 if( SQLITE_NULL==sqlite3_column_type(s, 0) ){ |
|
2193 rc = sqlite3_step(s); |
|
2194 if( rc==SQLITE_ROW ) return SQLITE_ERROR; |
|
2195 return rc; |
|
2196 } |
|
2197 |
|
2198 *pidx = sqlite3_column_int(s, 0); |
|
2199 |
|
2200 /* We expect only one row. We must execute another sqlite3_step() |
|
2201 * to complete the iteration; otherwise the table will remain locked. */ |
|
2202 rc = sqlite3_step(s); |
|
2203 if( rc==SQLITE_ROW ) return SQLITE_ERROR; |
|
2204 if( rc!=SQLITE_DONE ) return rc; |
|
2205 return SQLITE_ROW; |
|
2206 } |
|
2207 |
|
2208 /* insert into %_segdir values ( |
|
2209 ** [iLevel], [idx], |
|
2210 ** [iStartBlockid], [iLeavesEndBlockid], [iEndBlockid], |
|
2211 ** [pRootData] |
|
2212 ** ) |
|
2213 */ |
|
2214 static int segdir_set(fulltext_vtab *v, int iLevel, int idx, |
|
2215 sqlite_int64 iStartBlockid, |
|
2216 sqlite_int64 iLeavesEndBlockid, |
|
2217 sqlite_int64 iEndBlockid, |
|
2218 const char *pRootData, int nRootData){ |
|
2219 sqlite3_stmt *s; |
|
2220 int rc = sql_get_statement(v, SEGDIR_SET_STMT, &s); |
|
2221 if( rc!=SQLITE_OK ) return rc; |
|
2222 |
|
2223 rc = sqlite3_bind_int(s, 1, iLevel); |
|
2224 if( rc!=SQLITE_OK ) return rc; |
|
2225 |
|
2226 rc = sqlite3_bind_int(s, 2, idx); |
|
2227 if( rc!=SQLITE_OK ) return rc; |
|
2228 |
|
2229 rc = sqlite3_bind_int64(s, 3, iStartBlockid); |
|
2230 if( rc!=SQLITE_OK ) return rc; |
|
2231 |
|
2232 rc = sqlite3_bind_int64(s, 4, iLeavesEndBlockid); |
|
2233 if( rc!=SQLITE_OK ) return rc; |
|
2234 |
|
2235 rc = sqlite3_bind_int64(s, 5, iEndBlockid); |
|
2236 if( rc!=SQLITE_OK ) return rc; |
|
2237 |
|
2238 rc = sqlite3_bind_blob(s, 6, pRootData, nRootData, SQLITE_STATIC); |
|
2239 if( rc!=SQLITE_OK ) return rc; |
|
2240 |
|
2241 return sql_single_step(s); |
|
2242 } |
|
2243 |
|
2244 /* Queries %_segdir for the block span of the segments in level |
|
2245 ** iLevel. Returns SQLITE_DONE if there are no blocks for iLevel, |
|
2246 ** SQLITE_ROW if there are blocks, else an error. |
|
2247 */ |
|
2248 static int segdir_span(fulltext_vtab *v, int iLevel, |
|
2249 sqlite_int64 *piStartBlockid, |
|
2250 sqlite_int64 *piEndBlockid){ |
|
2251 sqlite3_stmt *s; |
|
2252 int rc = sql_get_statement(v, SEGDIR_SPAN_STMT, &s); |
|
2253 if( rc!=SQLITE_OK ) return rc; |
|
2254 |
|
2255 rc = sqlite3_bind_int(s, 1, iLevel); |
|
2256 if( rc!=SQLITE_OK ) return rc; |
|
2257 |
|
2258 rc = sqlite3_step(s); |
|
2259 if( rc==SQLITE_DONE ) return SQLITE_DONE; /* Should never happen */ |
|
2260 if( rc!=SQLITE_ROW ) return rc; |
|
2261 |
|
2262 /* This happens if all segments at this level are entirely inline. */ |
|
2263 if( SQLITE_NULL==sqlite3_column_type(s, 0) ){ |
|
2264 /* We expect only one row. We must execute another sqlite3_step() |
|
2265 * to complete the iteration; otherwise the table will remain locked. */ |
|
2266 int rc2 = sqlite3_step(s); |
|
2267 if( rc2==SQLITE_ROW ) return SQLITE_ERROR; |
|
2268 return rc2; |
|
2269 } |
|
2270 |
|
2271 *piStartBlockid = sqlite3_column_int64(s, 0); |
|
2272 *piEndBlockid = sqlite3_column_int64(s, 1); |
|
2273 |
|
2274 /* We expect only one row. We must execute another sqlite3_step() |
|
2275 * to complete the iteration; otherwise the table will remain locked. */ |
|
2276 rc = sqlite3_step(s); |
|
2277 if( rc==SQLITE_ROW ) return SQLITE_ERROR; |
|
2278 if( rc!=SQLITE_DONE ) return rc; |
|
2279 return SQLITE_ROW; |
|
2280 } |
|
2281 |
|
2282 /* Delete the segment blocks and segment directory records for all |
|
2283 ** segments at iLevel. |
|
2284 */ |
|
2285 static int segdir_delete(fulltext_vtab *v, int iLevel){ |
|
2286 sqlite3_stmt *s; |
|
2287 sqlite_int64 iStartBlockid, iEndBlockid; |
|
2288 int rc = segdir_span(v, iLevel, &iStartBlockid, &iEndBlockid); |
|
2289 if( rc!=SQLITE_ROW && rc!=SQLITE_DONE ) return rc; |
|
2290 |
|
2291 if( rc==SQLITE_ROW ){ |
|
2292 rc = block_delete(v, iStartBlockid, iEndBlockid); |
|
2293 if( rc!=SQLITE_OK ) return rc; |
|
2294 } |
|
2295 |
|
2296 /* Delete the segment directory itself. */ |
|
2297 rc = sql_get_statement(v, SEGDIR_DELETE_STMT, &s); |
|
2298 if( rc!=SQLITE_OK ) return rc; |
|
2299 |
|
2300 rc = sqlite3_bind_int64(s, 1, iLevel); |
|
2301 if( rc!=SQLITE_OK ) return rc; |
|
2302 |
|
2303 return sql_single_step(s); |
|
2304 } |
|
2305 |
|
2306 /* Delete entire fts index, SQLITE_OK on success, relevant error on |
|
2307 ** failure. |
|
2308 */ |
|
2309 static int segdir_delete_all(fulltext_vtab *v){ |
|
2310 sqlite3_stmt *s; |
|
2311 int rc = sql_get_statement(v, SEGDIR_DELETE_ALL_STMT, &s); |
|
2312 if( rc!=SQLITE_OK ) return rc; |
|
2313 |
|
2314 rc = sql_single_step(s); |
|
2315 if( rc!=SQLITE_OK ) return rc; |
|
2316 |
|
2317 rc = sql_get_statement(v, BLOCK_DELETE_ALL_STMT, &s); |
|
2318 if( rc!=SQLITE_OK ) return rc; |
|
2319 |
|
2320 return sql_single_step(s); |
|
2321 } |
|
2322 |
|
2323 /* Returns SQLITE_OK with *pnSegments set to the number of entries in |
|
2324 ** %_segdir and *piMaxLevel set to the highest level which has a |
|
2325 ** segment. Otherwise returns the SQLite error which caused failure. |
|
2326 */ |
|
2327 static int segdir_count(fulltext_vtab *v, int *pnSegments, int *piMaxLevel){ |
|
2328 sqlite3_stmt *s; |
|
2329 int rc = sql_get_statement(v, SEGDIR_COUNT_STMT, &s); |
|
2330 if( rc!=SQLITE_OK ) return rc; |
|
2331 |
|
2332 rc = sqlite3_step(s); |
|
2333 /* TODO(shess): This case should not be possible? Should stronger |
|
2334 ** measures be taken if it happens? |
|
2335 */ |
|
2336 if( rc==SQLITE_DONE ){ |
|
2337 *pnSegments = 0; |
|
2338 *piMaxLevel = 0; |
|
2339 return SQLITE_OK; |
|
2340 } |
|
2341 if( rc!=SQLITE_ROW ) return rc; |
|
2342 |
|
2343 *pnSegments = sqlite3_column_int(s, 0); |
|
2344 *piMaxLevel = sqlite3_column_int(s, 1); |
|
2345 |
|
2346 /* We expect only one row. We must execute another sqlite3_step() |
|
2347 * to complete the iteration; otherwise the table will remain locked. */ |
|
2348 rc = sqlite3_step(s); |
|
2349 if( rc==SQLITE_DONE ) return SQLITE_OK; |
|
2350 if( rc==SQLITE_ROW ) return SQLITE_ERROR; |
|
2351 return rc; |
|
2352 } |
|
2353 |
|
2354 /* TODO(shess) clearPendingTerms() is far down the file because |
|
2355 ** writeZeroSegment() is far down the file because LeafWriter is far |
|
2356 ** down the file. Consider refactoring the code to move the non-vtab |
|
2357 ** code above the vtab code so that we don't need this forward |
|
2358 ** reference. |
|
2359 */ |
|
2360 static int clearPendingTerms(fulltext_vtab *v); |
|
2361 |
|
2362 /* |
|
2363 ** Free the memory used to contain a fulltext_vtab structure. |
|
2364 */ |
|
2365 static void fulltext_vtab_destroy(fulltext_vtab *v){ |
|
2366 int iStmt, i; |
|
2367 |
|
2368 TRACE(("FTS2 Destroy %p\n", v)); |
|
2369 for( iStmt=0; iStmt<MAX_STMT; iStmt++ ){ |
|
2370 if( v->pFulltextStatements[iStmt]!=NULL ){ |
|
2371 sqlite3_finalize(v->pFulltextStatements[iStmt]); |
|
2372 v->pFulltextStatements[iStmt] = NULL; |
|
2373 } |
|
2374 } |
|
2375 |
|
2376 for( i=0; i<MERGE_COUNT; i++ ){ |
|
2377 if( v->pLeafSelectStmts[i]!=NULL ){ |
|
2378 sqlite3_finalize(v->pLeafSelectStmts[i]); |
|
2379 v->pLeafSelectStmts[i] = NULL; |
|
2380 } |
|
2381 } |
|
2382 |
|
2383 if( v->pTokenizer!=NULL ){ |
|
2384 v->pTokenizer->pModule->xDestroy(v->pTokenizer); |
|
2385 v->pTokenizer = NULL; |
|
2386 } |
|
2387 |
|
2388 clearPendingTerms(v); |
|
2389 |
|
2390 sqlite3_free(v->azColumn); |
|
2391 for(i = 0; i < v->nColumn; ++i) { |
|
2392 sqlite3_free(v->azContentColumn[i]); |
|
2393 } |
|
2394 sqlite3_free(v->azContentColumn); |
|
2395 sqlite3_free(v); |
|
2396 } |
|
2397 |
|
2398 /* |
|
2399 ** Token types for parsing the arguments to xConnect or xCreate. |
|
2400 */ |
|
2401 #define TOKEN_EOF 0 /* End of file */ |
|
2402 #define TOKEN_SPACE 1 /* Any kind of whitespace */ |
|
2403 #define TOKEN_ID 2 /* An identifier */ |
|
2404 #define TOKEN_STRING 3 /* A string literal */ |
|
2405 #define TOKEN_PUNCT 4 /* A single punctuation character */ |
|
2406 |
|
2407 /* |
|
2408 ** If X is a character that can be used in an identifier then |
|
2409 ** IdChar(X) will be true. Otherwise it is false. |
|
2410 ** |
|
2411 ** For ASCII, any character with the high-order bit set is |
|
2412 ** allowed in an identifier. For 7-bit characters, |
|
2413 ** sqlite3IsIdChar[X] must be 1. |
|
2414 ** |
|
2415 ** Ticket #1066. the SQL standard does not allow '$' in the |
|
2416 ** middle of identfiers. But many SQL implementations do. |
|
2417 ** SQLite will allow '$' in identifiers for compatibility. |
|
2418 ** But the feature is undocumented. |
|
2419 */ |
|
2420 static const char isIdChar[] = { |
|
2421 /* x0 x1 x2 x3 x4 x5 x6 x7 x8 x9 xA xB xC xD xE xF */ |
|
2422 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 2x */ |
|
2423 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, /* 3x */ |
|
2424 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 4x */ |
|
2425 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, /* 5x */ |
|
2426 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 6x */ |
|
2427 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, /* 7x */ |
|
2428 }; |
|
2429 #define IdChar(C) (((c=C)&0x80)!=0 || (c>0x1f && isIdChar[c-0x20])) |
|
2430 |
|
2431 |
|
2432 /* |
|
2433 ** Return the length of the token that begins at z[0]. |
|
2434 ** Store the token type in *tokenType before returning. |
|
2435 */ |
|
2436 static int getToken(const char *z, int *tokenType){ |
|
2437 int i, c; |
|
2438 switch( *z ){ |
|
2439 case 0: { |
|
2440 *tokenType = TOKEN_EOF; |
|
2441 return 0; |
|
2442 } |
|
2443 case ' ': case '\t': case '\n': case '\f': case '\r': { |
|
2444 for(i=1; safe_isspace(z[i]); i++){} |
|
2445 *tokenType = TOKEN_SPACE; |
|
2446 return i; |
|
2447 } |
|
2448 case '`': |
|
2449 case '\'': |
|
2450 case '"': { |
|
2451 int delim = z[0]; |
|
2452 for(i=1; (c=z[i])!=0; i++){ |
|
2453 if( c==delim ){ |
|
2454 if( z[i+1]==delim ){ |
|
2455 i++; |
|
2456 }else{ |
|
2457 break; |
|
2458 } |
|
2459 } |
|
2460 } |
|
2461 *tokenType = TOKEN_STRING; |
|
2462 return i + (c!=0); |
|
2463 } |
|
2464 case '[': { |
|
2465 for(i=1, c=z[0]; c!=']' && (c=z[i])!=0; i++){} |
|
2466 *tokenType = TOKEN_ID; |
|
2467 return i; |
|
2468 } |
|
2469 default: { |
|
2470 if( !IdChar(*z) ){ |
|
2471 break; |
|
2472 } |
|
2473 for(i=1; IdChar(z[i]); i++){} |
|
2474 *tokenType = TOKEN_ID; |
|
2475 return i; |
|
2476 } |
|
2477 } |
|
2478 *tokenType = TOKEN_PUNCT; |
|
2479 return 1; |
|
2480 } |
|
2481 |
|
2482 /* |
|
2483 ** A token extracted from a string is an instance of the following |
|
2484 ** structure. |
|
2485 */ |
|
2486 typedef struct Token { |
|
2487 const char *z; /* Pointer to token text. Not '\000' terminated */ |
|
2488 short int n; /* Length of the token text in bytes. */ |
|
2489 } Token; |
|
2490 |
|
2491 /* |
|
2492 ** Given a input string (which is really one of the argv[] parameters |
|
2493 ** passed into xConnect or xCreate) split the string up into tokens. |
|
2494 ** Return an array of pointers to '\000' terminated strings, one string |
|
2495 ** for each non-whitespace token. |
|
2496 ** |
|
2497 ** The returned array is terminated by a single NULL pointer. |
|
2498 ** |
|
2499 ** Space to hold the returned array is obtained from a single |
|
2500 ** malloc and should be freed by passing the return value to free(). |
|
2501 ** The individual strings within the token list are all a part of |
|
2502 ** the single memory allocation and will all be freed at once. |
|
2503 */ |
|
2504 static char **tokenizeString(const char *z, int *pnToken){ |
|
2505 int nToken = 0; |
|
2506 Token *aToken = sqlite3_malloc( strlen(z) * sizeof(aToken[0]) ); |
|
2507 int n = 1; |
|
2508 int e, i; |
|
2509 int totalSize = 0; |
|
2510 char **azToken; |
|
2511 char *zCopy; |
|
2512 while( n>0 ){ |
|
2513 n = getToken(z, &e); |
|
2514 if( e!=TOKEN_SPACE ){ |
|
2515 aToken[nToken].z = z; |
|
2516 aToken[nToken].n = n; |
|
2517 nToken++; |
|
2518 totalSize += n+1; |
|
2519 } |
|
2520 z += n; |
|
2521 } |
|
2522 azToken = (char**)sqlite3_malloc( nToken*sizeof(char*) + totalSize ); |
|
2523 zCopy = (char*)&azToken[nToken]; |
|
2524 nToken--; |
|
2525 for(i=0; i<nToken; i++){ |
|
2526 azToken[i] = zCopy; |
|
2527 n = aToken[i].n; |
|
2528 memcpy(zCopy, aToken[i].z, n); |
|
2529 zCopy[n] = 0; |
|
2530 zCopy += n+1; |
|
2531 } |
|
2532 azToken[nToken] = 0; |
|
2533 sqlite3_free(aToken); |
|
2534 *pnToken = nToken; |
|
2535 return azToken; |
|
2536 } |
|
2537 |
|
2538 /* |
|
2539 ** Convert an SQL-style quoted string into a normal string by removing |
|
2540 ** the quote characters. The conversion is done in-place. If the |
|
2541 ** input does not begin with a quote character, then this routine |
|
2542 ** is a no-op. |
|
2543 ** |
|
2544 ** Examples: |
|
2545 ** |
|
2546 ** "abc" becomes abc |
|
2547 ** 'xyz' becomes xyz |
|
2548 ** [pqr] becomes pqr |
|
2549 ** `mno` becomes mno |
|
2550 */ |
|
2551 static void dequoteString(char *z){ |
|
2552 int quote; |
|
2553 int i, j; |
|
2554 if( z==0 ) return; |
|
2555 quote = z[0]; |
|
2556 switch( quote ){ |
|
2557 case '\'': break; |
|
2558 case '"': break; |
|
2559 case '`': break; /* For MySQL compatibility */ |
|
2560 case '[': quote = ']'; break; /* For MS SqlServer compatibility */ |
|
2561 default: return; |
|
2562 } |
|
2563 for(i=1, j=0; z[i]; i++){ |
|
2564 if( z[i]==quote ){ |
|
2565 if( z[i+1]==quote ){ |
|
2566 z[j++] = quote; |
|
2567 i++; |
|
2568 }else{ |
|
2569 z[j++] = 0; |
|
2570 break; |
|
2571 } |
|
2572 }else{ |
|
2573 z[j++] = z[i]; |
|
2574 } |
|
2575 } |
|
2576 } |
|
2577 |
|
2578 /* |
|
2579 ** The input azIn is a NULL-terminated list of tokens. Remove the first |
|
2580 ** token and all punctuation tokens. Remove the quotes from |
|
2581 ** around string literal tokens. |
|
2582 ** |
|
2583 ** Example: |
|
2584 ** |
|
2585 ** input: tokenize chinese ( 'simplifed' , 'mixed' ) |
|
2586 ** output: chinese simplifed mixed |
|
2587 ** |
|
2588 ** Another example: |
|
2589 ** |
|
2590 ** input: delimiters ( '[' , ']' , '...' ) |
|
2591 ** output: [ ] ... |
|
2592 */ |
|
2593 static void tokenListToIdList(char **azIn){ |
|
2594 int i, j; |
|
2595 if( azIn ){ |
|
2596 for(i=0, j=-1; azIn[i]; i++){ |
|
2597 if( safe_isalnum(azIn[i][0]) || azIn[i][1] ){ |
|
2598 dequoteString(azIn[i]); |
|
2599 if( j>=0 ){ |
|
2600 azIn[j] = azIn[i]; |
|
2601 } |
|
2602 j++; |
|
2603 } |
|
2604 } |
|
2605 azIn[j] = 0; |
|
2606 } |
|
2607 } |
|
2608 |
|
2609 |
|
2610 /* |
|
2611 ** Find the first alphanumeric token in the string zIn. Null-terminate |
|
2612 ** this token. Remove any quotation marks. And return a pointer to |
|
2613 ** the result. |
|
2614 */ |
|
2615 static char *firstToken(char *zIn, char **pzTail){ |
|
2616 int n, ttype; |
|
2617 while(1){ |
|
2618 n = getToken(zIn, &ttype); |
|
2619 if( ttype==TOKEN_SPACE ){ |
|
2620 zIn += n; |
|
2621 }else if( ttype==TOKEN_EOF ){ |
|
2622 *pzTail = zIn; |
|
2623 return 0; |
|
2624 }else{ |
|
2625 zIn[n] = 0; |
|
2626 *pzTail = &zIn[1]; |
|
2627 dequoteString(zIn); |
|
2628 return zIn; |
|
2629 } |
|
2630 } |
|
2631 /*NOTREACHED*/ |
|
2632 } |
|
2633 |
|
2634 /* Return true if... |
|
2635 ** |
|
2636 ** * s begins with the string t, ignoring case |
|
2637 ** * s is longer than t |
|
2638 ** * The first character of s beyond t is not a alphanumeric |
|
2639 ** |
|
2640 ** Ignore leading space in *s. |
|
2641 ** |
|
2642 ** To put it another way, return true if the first token of |
|
2643 ** s[] is t[]. |
|
2644 */ |
|
2645 static int startsWith(const char *s, const char *t){ |
|
2646 while( safe_isspace(*s) ){ s++; } |
|
2647 while( *t ){ |
|
2648 if( safe_tolower(*s++)!=safe_tolower(*t++) ) return 0; |
|
2649 } |
|
2650 return *s!='_' && !safe_isalnum(*s); |
|
2651 } |
|
2652 |
|
2653 /* |
|
2654 ** An instance of this structure defines the "spec" of a |
|
2655 ** full text index. This structure is populated by parseSpec |
|
2656 ** and use by fulltextConnect and fulltextCreate. |
|
2657 */ |
|
2658 typedef struct TableSpec { |
|
2659 const char *zDb; /* Logical database name */ |
|
2660 const char *zName; /* Name of the full-text index */ |
|
2661 int nColumn; /* Number of columns to be indexed */ |
|
2662 char **azColumn; /* Original names of columns to be indexed */ |
|
2663 char **azContentColumn; /* Column names for %_content */ |
|
2664 char **azTokenizer; /* Name of tokenizer and its arguments */ |
|
2665 } TableSpec; |
|
2666 |
|
2667 /* |
|
2668 ** Reclaim all of the memory used by a TableSpec |
|
2669 */ |
|
2670 static void clearTableSpec(TableSpec *p) { |
|
2671 sqlite3_free(p->azColumn); |
|
2672 sqlite3_free(p->azContentColumn); |
|
2673 sqlite3_free(p->azTokenizer); |
|
2674 } |
|
2675 |
|
2676 /* Parse a CREATE VIRTUAL TABLE statement, which looks like this: |
|
2677 * |
|
2678 * CREATE VIRTUAL TABLE email |
|
2679 * USING fts2(subject, body, tokenize mytokenizer(myarg)) |
|
2680 * |
|
2681 * We return parsed information in a TableSpec structure. |
|
2682 * |
|
2683 */ |
|
2684 static int parseSpec(TableSpec *pSpec, int argc, const char *const*argv, |
|
2685 char**pzErr){ |
|
2686 int i, n; |
|
2687 char *z, *zDummy; |
|
2688 char **azArg; |
|
2689 const char *zTokenizer = 0; /* argv[] entry describing the tokenizer */ |
|
2690 |
|
2691 assert( argc>=3 ); |
|
2692 /* Current interface: |
|
2693 ** argv[0] - module name |
|
2694 ** argv[1] - database name |
|
2695 ** argv[2] - table name |
|
2696 ** argv[3..] - columns, optionally followed by tokenizer specification |
|
2697 ** and snippet delimiters specification. |
|
2698 */ |
|
2699 |
|
2700 /* Make a copy of the complete argv[][] array in a single allocation. |
|
2701 ** The argv[][] array is read-only and transient. We can write to the |
|
2702 ** copy in order to modify things and the copy is persistent. |
|
2703 */ |
|
2704 CLEAR(pSpec); |
|
2705 for(i=n=0; i<argc; i++){ |
|
2706 n += strlen(argv[i]) + 1; |
|
2707 } |
|
2708 azArg = sqlite3_malloc( sizeof(char*)*argc + n ); |
|
2709 if( azArg==0 ){ |
|
2710 return SQLITE_NOMEM; |
|
2711 } |
|
2712 z = (char*)&azArg[argc]; |
|
2713 for(i=0; i<argc; i++){ |
|
2714 azArg[i] = z; |
|
2715 strcpy(z, argv[i]); |
|
2716 z += strlen(z)+1; |
|
2717 } |
|
2718 |
|
2719 /* Identify the column names and the tokenizer and delimiter arguments |
|
2720 ** in the argv[][] array. |
|
2721 */ |
|
2722 pSpec->zDb = azArg[1]; |
|
2723 pSpec->zName = azArg[2]; |
|
2724 pSpec->nColumn = 0; |
|
2725 pSpec->azColumn = azArg; |
|
2726 zTokenizer = "tokenize simple"; |
|
2727 for(i=3; i<argc; ++i){ |
|
2728 if( startsWith(azArg[i],"tokenize") ){ |
|
2729 zTokenizer = azArg[i]; |
|
2730 }else{ |
|
2731 z = azArg[pSpec->nColumn] = firstToken(azArg[i], &zDummy); |
|
2732 pSpec->nColumn++; |
|
2733 } |
|
2734 } |
|
2735 if( pSpec->nColumn==0 ){ |
|
2736 azArg[0] = "content"; |
|
2737 pSpec->nColumn = 1; |
|
2738 } |
|
2739 |
|
2740 /* |
|
2741 ** Construct the list of content column names. |
|
2742 ** |
|
2743 ** Each content column name will be of the form cNNAAAA |
|
2744 ** where NN is the column number and AAAA is the sanitized |
|
2745 ** column name. "sanitized" means that special characters are |
|
2746 ** converted to "_". The cNN prefix guarantees that all column |
|
2747 ** names are unique. |
|
2748 ** |
|
2749 ** The AAAA suffix is not strictly necessary. It is included |
|
2750 ** for the convenience of people who might examine the generated |
|
2751 ** %_content table and wonder what the columns are used for. |
|
2752 */ |
|
2753 pSpec->azContentColumn = sqlite3_malloc( pSpec->nColumn * sizeof(char *) ); |
|
2754 if( pSpec->azContentColumn==0 ){ |
|
2755 clearTableSpec(pSpec); |
|
2756 return SQLITE_NOMEM; |
|
2757 } |
|
2758 for(i=0; i<pSpec->nColumn; i++){ |
|
2759 char *p; |
|
2760 pSpec->azContentColumn[i] = sqlite3_mprintf("c%d%s", i, azArg[i]); |
|
2761 for (p = pSpec->azContentColumn[i]; *p ; ++p) { |
|
2762 if( !safe_isalnum(*p) ) *p = '_'; |
|
2763 } |
|
2764 } |
|
2765 |
|
2766 /* |
|
2767 ** Parse the tokenizer specification string. |
|
2768 */ |
|
2769 pSpec->azTokenizer = tokenizeString(zTokenizer, &n); |
|
2770 tokenListToIdList(pSpec->azTokenizer); |
|
2771 |
|
2772 return SQLITE_OK; |
|
2773 } |
|
2774 |
|
2775 /* |
|
2776 ** Generate a CREATE TABLE statement that describes the schema of |
|
2777 ** the virtual table. Return a pointer to this schema string. |
|
2778 ** |
|
2779 ** Space is obtained from sqlite3_mprintf() and should be freed |
|
2780 ** using sqlite3_free(). |
|
2781 */ |
|
2782 static char *fulltextSchema( |
|
2783 int nColumn, /* Number of columns */ |
|
2784 const char *const* azColumn, /* List of columns */ |
|
2785 const char *zTableName /* Name of the table */ |
|
2786 ){ |
|
2787 int i; |
|
2788 char *zSchema, *zNext; |
|
2789 const char *zSep = "("; |
|
2790 zSchema = sqlite3_mprintf("CREATE TABLE x"); |
|
2791 for(i=0; i<nColumn; i++){ |
|
2792 zNext = sqlite3_mprintf("%s%s%Q", zSchema, zSep, azColumn[i]); |
|
2793 sqlite3_free(zSchema); |
|
2794 zSchema = zNext; |
|
2795 zSep = ","; |
|
2796 } |
|
2797 zNext = sqlite3_mprintf("%s,%Q)", zSchema, zTableName); |
|
2798 sqlite3_free(zSchema); |
|
2799 return zNext; |
|
2800 } |
|
2801 |
|
2802 /* |
|
2803 ** Build a new sqlite3_vtab structure that will describe the |
|
2804 ** fulltext index defined by spec. |
|
2805 */ |
|
2806 static int constructVtab( |
|
2807 sqlite3 *db, /* The SQLite database connection */ |
|
2808 fts2Hash *pHash, /* Hash table containing tokenizers */ |
|
2809 TableSpec *spec, /* Parsed spec information from parseSpec() */ |
|
2810 sqlite3_vtab **ppVTab, /* Write the resulting vtab structure here */ |
|
2811 char **pzErr /* Write any error message here */ |
|
2812 ){ |
|
2813 int rc; |
|
2814 int n; |
|
2815 fulltext_vtab *v = 0; |
|
2816 const sqlite3_tokenizer_module *m = NULL; |
|
2817 char *schema; |
|
2818 |
|
2819 char const *zTok; /* Name of tokenizer to use for this fts table */ |
|
2820 int nTok; /* Length of zTok, including nul terminator */ |
|
2821 |
|
2822 v = (fulltext_vtab *) sqlite3_malloc(sizeof(fulltext_vtab)); |
|
2823 if( v==0 ) return SQLITE_NOMEM; |
|
2824 CLEAR(v); |
|
2825 /* sqlite will initialize v->base */ |
|
2826 v->db = db; |
|
2827 v->zDb = spec->zDb; /* Freed when azColumn is freed */ |
|
2828 v->zName = spec->zName; /* Freed when azColumn is freed */ |
|
2829 v->nColumn = spec->nColumn; |
|
2830 v->azContentColumn = spec->azContentColumn; |
|
2831 spec->azContentColumn = 0; |
|
2832 v->azColumn = spec->azColumn; |
|
2833 spec->azColumn = 0; |
|
2834 |
|
2835 if( spec->azTokenizer==0 ){ |
|
2836 return SQLITE_NOMEM; |
|
2837 } |
|
2838 |
|
2839 zTok = spec->azTokenizer[0]; |
|
2840 if( !zTok ){ |
|
2841 zTok = "simple"; |
|
2842 } |
|
2843 nTok = strlen(zTok)+1; |
|
2844 |
|
2845 m = (sqlite3_tokenizer_module *)sqlite3Fts2HashFind(pHash, zTok, nTok); |
|
2846 if( !m ){ |
|
2847 *pzErr = sqlite3_mprintf("unknown tokenizer: %s", spec->azTokenizer[0]); |
|
2848 rc = SQLITE_ERROR; |
|
2849 goto err; |
|
2850 } |
|
2851 |
|
2852 for(n=0; spec->azTokenizer[n]; n++){} |
|
2853 if( n ){ |
|
2854 rc = m->xCreate(n-1, (const char*const*)&spec->azTokenizer[1], |
|
2855 &v->pTokenizer); |
|
2856 }else{ |
|
2857 rc = m->xCreate(0, 0, &v->pTokenizer); |
|
2858 } |
|
2859 if( rc!=SQLITE_OK ) goto err; |
|
2860 v->pTokenizer->pModule = m; |
|
2861 |
|
2862 /* TODO: verify the existence of backing tables foo_content, foo_term */ |
|
2863 |
|
2864 schema = fulltextSchema(v->nColumn, (const char*const*)v->azColumn, |
|
2865 spec->zName); |
|
2866 rc = sqlite3_declare_vtab(db, schema); |
|
2867 sqlite3_free(schema); |
|
2868 if( rc!=SQLITE_OK ) goto err; |
|
2869 |
|
2870 memset(v->pFulltextStatements, 0, sizeof(v->pFulltextStatements)); |
|
2871 |
|
2872 /* Indicate that the buffer is not live. */ |
|
2873 v->nPendingData = -1; |
|
2874 |
|
2875 *ppVTab = &v->base; |
|
2876 TRACE(("FTS2 Connect %p\n", v)); |
|
2877 |
|
2878 return rc; |
|
2879 |
|
2880 err: |
|
2881 fulltext_vtab_destroy(v); |
|
2882 return rc; |
|
2883 } |
|
2884 |
|
2885 static int fulltextConnect( |
|
2886 sqlite3 *db, |
|
2887 void *pAux, |
|
2888 int argc, const char *const*argv, |
|
2889 sqlite3_vtab **ppVTab, |
|
2890 char **pzErr |
|
2891 ){ |
|
2892 TableSpec spec; |
|
2893 int rc = parseSpec(&spec, argc, argv, pzErr); |
|
2894 if( rc!=SQLITE_OK ) return rc; |
|
2895 |
|
2896 rc = constructVtab(db, (fts2Hash *)pAux, &spec, ppVTab, pzErr); |
|
2897 clearTableSpec(&spec); |
|
2898 return rc; |
|
2899 } |
|
2900 |
|
2901 /* The %_content table holds the text of each document, with |
|
2902 ** the rowid used as the docid. |
|
2903 */ |
|
2904 /* TODO(shess) This comment needs elaboration to match the updated |
|
2905 ** code. Work it into the top-of-file comment at that time. |
|
2906 */ |
|
2907 static int fulltextCreate(sqlite3 *db, void *pAux, |
|
2908 int argc, const char * const *argv, |
|
2909 sqlite3_vtab **ppVTab, char **pzErr){ |
|
2910 int rc; |
|
2911 TableSpec spec; |
|
2912 StringBuffer schema; |
|
2913 TRACE(("FTS2 Create\n")); |
|
2914 |
|
2915 rc = parseSpec(&spec, argc, argv, pzErr); |
|
2916 if( rc!=SQLITE_OK ) return rc; |
|
2917 |
|
2918 initStringBuffer(&schema); |
|
2919 append(&schema, "CREATE TABLE %_content("); |
|
2920 appendList(&schema, spec.nColumn, spec.azContentColumn); |
|
2921 append(&schema, ")"); |
|
2922 rc = sql_exec(db, spec.zDb, spec.zName, stringBufferData(&schema)); |
|
2923 stringBufferDestroy(&schema); |
|
2924 if( rc!=SQLITE_OK ) goto out; |
|
2925 |
|
2926 rc = sql_exec(db, spec.zDb, spec.zName, |
|
2927 "create table %_segments(block blob);"); |
|
2928 if( rc!=SQLITE_OK ) goto out; |
|
2929 |
|
2930 rc = sql_exec(db, spec.zDb, spec.zName, |
|
2931 "create table %_segdir(" |
|
2932 " level integer," |
|
2933 " idx integer," |
|
2934 " start_block integer," |
|
2935 " leaves_end_block integer," |
|
2936 " end_block integer," |
|
2937 " root blob," |
|
2938 " primary key(level, idx)" |
|
2939 ");"); |
|
2940 if( rc!=SQLITE_OK ) goto out; |
|
2941 |
|
2942 rc = constructVtab(db, (fts2Hash *)pAux, &spec, ppVTab, pzErr); |
|
2943 |
|
2944 out: |
|
2945 clearTableSpec(&spec); |
|
2946 return rc; |
|
2947 } |
|
2948 |
|
2949 /* Decide how to handle an SQL query. */ |
|
2950 static int fulltextBestIndex(sqlite3_vtab *pVTab, sqlite3_index_info *pInfo){ |
|
2951 int i; |
|
2952 TRACE(("FTS2 BestIndex\n")); |
|
2953 |
|
2954 for(i=0; i<pInfo->nConstraint; ++i){ |
|
2955 const struct sqlite3_index_constraint *pConstraint; |
|
2956 pConstraint = &pInfo->aConstraint[i]; |
|
2957 if( pConstraint->usable ) { |
|
2958 if( pConstraint->iColumn==-1 && |
|
2959 pConstraint->op==SQLITE_INDEX_CONSTRAINT_EQ ){ |
|
2960 pInfo->idxNum = QUERY_ROWID; /* lookup by rowid */ |
|
2961 TRACE(("FTS2 QUERY_ROWID\n")); |
|
2962 } else if( pConstraint->iColumn>=0 && |
|
2963 pConstraint->op==SQLITE_INDEX_CONSTRAINT_MATCH ){ |
|
2964 /* full-text search */ |
|
2965 pInfo->idxNum = QUERY_FULLTEXT + pConstraint->iColumn; |
|
2966 TRACE(("FTS2 QUERY_FULLTEXT %d\n", pConstraint->iColumn)); |
|
2967 } else continue; |
|
2968 |
|
2969 pInfo->aConstraintUsage[i].argvIndex = 1; |
|
2970 pInfo->aConstraintUsage[i].omit = 1; |
|
2971 |
|
2972 /* An arbitrary value for now. |
|
2973 * TODO: Perhaps rowid matches should be considered cheaper than |
|
2974 * full-text searches. */ |
|
2975 pInfo->estimatedCost = 1.0; |
|
2976 |
|
2977 return SQLITE_OK; |
|
2978 } |
|
2979 } |
|
2980 pInfo->idxNum = QUERY_GENERIC; |
|
2981 return SQLITE_OK; |
|
2982 } |
|
2983 |
|
2984 static int fulltextDisconnect(sqlite3_vtab *pVTab){ |
|
2985 TRACE(("FTS2 Disconnect %p\n", pVTab)); |
|
2986 fulltext_vtab_destroy((fulltext_vtab *)pVTab); |
|
2987 return SQLITE_OK; |
|
2988 } |
|
2989 |
|
2990 static int fulltextDestroy(sqlite3_vtab *pVTab){ |
|
2991 fulltext_vtab *v = (fulltext_vtab *)pVTab; |
|
2992 int rc; |
|
2993 |
|
2994 TRACE(("FTS2 Destroy %p\n", pVTab)); |
|
2995 rc = sql_exec(v->db, v->zDb, v->zName, |
|
2996 "drop table if exists %_content;" |
|
2997 "drop table if exists %_segments;" |
|
2998 "drop table if exists %_segdir;" |
|
2999 ); |
|
3000 if( rc!=SQLITE_OK ) return rc; |
|
3001 |
|
3002 fulltext_vtab_destroy((fulltext_vtab *)pVTab); |
|
3003 return SQLITE_OK; |
|
3004 } |
|
3005 |
|
3006 static int fulltextOpen(sqlite3_vtab *pVTab, sqlite3_vtab_cursor **ppCursor){ |
|
3007 fulltext_cursor *c; |
|
3008 |
|
3009 c = (fulltext_cursor *) sqlite3_malloc(sizeof(fulltext_cursor)); |
|
3010 if( c ){ |
|
3011 memset(c, 0, sizeof(fulltext_cursor)); |
|
3012 /* sqlite will initialize c->base */ |
|
3013 *ppCursor = &c->base; |
|
3014 TRACE(("FTS2 Open %p: %p\n", pVTab, c)); |
|
3015 return SQLITE_OK; |
|
3016 }else{ |
|
3017 return SQLITE_NOMEM; |
|
3018 } |
|
3019 } |
|
3020 |
|
3021 |
|
3022 /* Free all of the dynamically allocated memory held by *q |
|
3023 */ |
|
3024 static void queryClear(Query *q){ |
|
3025 int i; |
|
3026 for(i = 0; i < q->nTerms; ++i){ |
|
3027 sqlite3_free(q->pTerms[i].pTerm); |
|
3028 } |
|
3029 sqlite3_free(q->pTerms); |
|
3030 CLEAR(q); |
|
3031 } |
|
3032 |
|
3033 /* Free all of the dynamically allocated memory held by the |
|
3034 ** Snippet |
|
3035 */ |
|
3036 static void snippetClear(Snippet *p){ |
|
3037 sqlite3_free(p->aMatch); |
|
3038 sqlite3_free(p->zOffset); |
|
3039 sqlite3_free(p->zSnippet); |
|
3040 CLEAR(p); |
|
3041 } |
|
3042 /* |
|
3043 ** Append a single entry to the p->aMatch[] log. |
|
3044 */ |
|
3045 static void snippetAppendMatch( |
|
3046 Snippet *p, /* Append the entry to this snippet */ |
|
3047 int iCol, int iTerm, /* The column and query term */ |
|
3048 int iStart, int nByte /* Offset and size of the match */ |
|
3049 ){ |
|
3050 int i; |
|
3051 struct snippetMatch *pMatch; |
|
3052 if( p->nMatch+1>=p->nAlloc ){ |
|
3053 p->nAlloc = p->nAlloc*2 + 10; |
|
3054 p->aMatch = sqlite3_realloc(p->aMatch, p->nAlloc*sizeof(p->aMatch[0]) ); |
|
3055 if( p->aMatch==0 ){ |
|
3056 p->nMatch = 0; |
|
3057 p->nAlloc = 0; |
|
3058 return; |
|
3059 } |
|
3060 } |
|
3061 i = p->nMatch++; |
|
3062 pMatch = &p->aMatch[i]; |
|
3063 pMatch->iCol = iCol; |
|
3064 pMatch->iTerm = iTerm; |
|
3065 pMatch->iStart = iStart; |
|
3066 pMatch->nByte = nByte; |
|
3067 } |
|
3068 |
|
3069 /* |
|
3070 ** Sizing information for the circular buffer used in snippetOffsetsOfColumn() |
|
3071 */ |
|
3072 #define FTS2_ROTOR_SZ (32) |
|
3073 #define FTS2_ROTOR_MASK (FTS2_ROTOR_SZ-1) |
|
3074 |
|
3075 /* |
|
3076 ** Add entries to pSnippet->aMatch[] for every match that occurs against |
|
3077 ** document zDoc[0..nDoc-1] which is stored in column iColumn. |
|
3078 */ |
|
3079 static void snippetOffsetsOfColumn( |
|
3080 Query *pQuery, |
|
3081 Snippet *pSnippet, |
|
3082 int iColumn, |
|
3083 const char *zDoc, |
|
3084 int nDoc |
|
3085 ){ |
|
3086 const sqlite3_tokenizer_module *pTModule; /* The tokenizer module */ |
|
3087 sqlite3_tokenizer *pTokenizer; /* The specific tokenizer */ |
|
3088 sqlite3_tokenizer_cursor *pTCursor; /* Tokenizer cursor */ |
|
3089 fulltext_vtab *pVtab; /* The full text index */ |
|
3090 int nColumn; /* Number of columns in the index */ |
|
3091 const QueryTerm *aTerm; /* Query string terms */ |
|
3092 int nTerm; /* Number of query string terms */ |
|
3093 int i, j; /* Loop counters */ |
|
3094 int rc; /* Return code */ |
|
3095 unsigned int match, prevMatch; /* Phrase search bitmasks */ |
|
3096 const char *zToken; /* Next token from the tokenizer */ |
|
3097 int nToken; /* Size of zToken */ |
|
3098 int iBegin, iEnd, iPos; /* Offsets of beginning and end */ |
|
3099 |
|
3100 /* The following variables keep a circular buffer of the last |
|
3101 ** few tokens */ |
|
3102 unsigned int iRotor = 0; /* Index of current token */ |
|
3103 int iRotorBegin[FTS2_ROTOR_SZ]; /* Beginning offset of token */ |
|
3104 int iRotorLen[FTS2_ROTOR_SZ]; /* Length of token */ |
|
3105 |
|
3106 pVtab = pQuery->pFts; |
|
3107 nColumn = pVtab->nColumn; |
|
3108 pTokenizer = pVtab->pTokenizer; |
|
3109 pTModule = pTokenizer->pModule; |
|
3110 rc = pTModule->xOpen(pTokenizer, zDoc, nDoc, &pTCursor); |
|
3111 if( rc ) return; |
|
3112 pTCursor->pTokenizer = pTokenizer; |
|
3113 aTerm = pQuery->pTerms; |
|
3114 nTerm = pQuery->nTerms; |
|
3115 if( nTerm>=FTS2_ROTOR_SZ ){ |
|
3116 nTerm = FTS2_ROTOR_SZ - 1; |
|
3117 } |
|
3118 prevMatch = 0; |
|
3119 while(1){ |
|
3120 rc = pTModule->xNext(pTCursor, &zToken, &nToken, &iBegin, &iEnd, &iPos); |
|
3121 if( rc ) break; |
|
3122 iRotorBegin[iRotor&FTS2_ROTOR_MASK] = iBegin; |
|
3123 iRotorLen[iRotor&FTS2_ROTOR_MASK] = iEnd-iBegin; |
|
3124 match = 0; |
|
3125 for(i=0; i<nTerm; i++){ |
|
3126 int iCol; |
|
3127 iCol = aTerm[i].iColumn; |
|
3128 if( iCol>=0 && iCol<nColumn && iCol!=iColumn ) continue; |
|
3129 if( aTerm[i].nTerm>nToken ) continue; |
|
3130 if( !aTerm[i].isPrefix && aTerm[i].nTerm<nToken ) continue; |
|
3131 assert( aTerm[i].nTerm<=nToken ); |
|
3132 if( memcmp(aTerm[i].pTerm, zToken, aTerm[i].nTerm) ) continue; |
|
3133 if( aTerm[i].iPhrase>1 && (prevMatch & (1<<i))==0 ) continue; |
|
3134 match |= 1<<i; |
|
3135 if( i==nTerm-1 || aTerm[i+1].iPhrase==1 ){ |
|
3136 for(j=aTerm[i].iPhrase-1; j>=0; j--){ |
|
3137 int k = (iRotor-j) & FTS2_ROTOR_MASK; |
|
3138 snippetAppendMatch(pSnippet, iColumn, i-j, |
|
3139 iRotorBegin[k], iRotorLen[k]); |
|
3140 } |
|
3141 } |
|
3142 } |
|
3143 prevMatch = match<<1; |
|
3144 iRotor++; |
|
3145 } |
|
3146 pTModule->xClose(pTCursor); |
|
3147 } |
|
3148 |
|
3149 |
|
3150 /* |
|
3151 ** Compute all offsets for the current row of the query. |
|
3152 ** If the offsets have already been computed, this routine is a no-op. |
|
3153 */ |
|
3154 static void snippetAllOffsets(fulltext_cursor *p){ |
|
3155 int nColumn; |
|
3156 int iColumn, i; |
|
3157 int iFirst, iLast; |
|
3158 fulltext_vtab *pFts; |
|
3159 |
|
3160 if( p->snippet.nMatch ) return; |
|
3161 if( p->q.nTerms==0 ) return; |
|
3162 pFts = p->q.pFts; |
|
3163 nColumn = pFts->nColumn; |
|
3164 iColumn = (p->iCursorType - QUERY_FULLTEXT); |
|
3165 if( iColumn<0 || iColumn>=nColumn ){ |
|
3166 iFirst = 0; |
|
3167 iLast = nColumn-1; |
|
3168 }else{ |
|
3169 iFirst = iColumn; |
|
3170 iLast = iColumn; |
|
3171 } |
|
3172 for(i=iFirst; i<=iLast; i++){ |
|
3173 const char *zDoc; |
|
3174 int nDoc; |
|
3175 zDoc = (const char*)sqlite3_column_text(p->pStmt, i+1); |
|
3176 nDoc = sqlite3_column_bytes(p->pStmt, i+1); |
|
3177 snippetOffsetsOfColumn(&p->q, &p->snippet, i, zDoc, nDoc); |
|
3178 } |
|
3179 } |
|
3180 |
|
3181 /* |
|
3182 ** Convert the information in the aMatch[] array of the snippet |
|
3183 ** into the string zOffset[0..nOffset-1]. |
|
3184 */ |
|
3185 static void snippetOffsetText(Snippet *p){ |
|
3186 int i; |
|
3187 int cnt = 0; |
|
3188 StringBuffer sb; |
|
3189 char zBuf[200]; |
|
3190 if( p->zOffset ) return; |
|
3191 initStringBuffer(&sb); |
|
3192 for(i=0; i<p->nMatch; i++){ |
|
3193 struct snippetMatch *pMatch = &p->aMatch[i]; |
|
3194 zBuf[0] = ' '; |
|
3195 sqlite3_snprintf(sizeof(zBuf)-1, &zBuf[cnt>0], "%d %d %d %d", |
|
3196 pMatch->iCol, pMatch->iTerm, pMatch->iStart, pMatch->nByte); |
|
3197 append(&sb, zBuf); |
|
3198 cnt++; |
|
3199 } |
|
3200 p->zOffset = stringBufferData(&sb); |
|
3201 p->nOffset = stringBufferLength(&sb); |
|
3202 } |
|
3203 |
|
3204 /* |
|
3205 ** zDoc[0..nDoc-1] is phrase of text. aMatch[0..nMatch-1] are a set |
|
3206 ** of matching words some of which might be in zDoc. zDoc is column |
|
3207 ** number iCol. |
|
3208 ** |
|
3209 ** iBreak is suggested spot in zDoc where we could begin or end an |
|
3210 ** excerpt. Return a value similar to iBreak but possibly adjusted |
|
3211 ** to be a little left or right so that the break point is better. |
|
3212 */ |
|
3213 static int wordBoundary( |
|
3214 int iBreak, /* The suggested break point */ |
|
3215 const char *zDoc, /* Document text */ |
|
3216 int nDoc, /* Number of bytes in zDoc[] */ |
|
3217 struct snippetMatch *aMatch, /* Matching words */ |
|
3218 int nMatch, /* Number of entries in aMatch[] */ |
|
3219 int iCol /* The column number for zDoc[] */ |
|
3220 ){ |
|
3221 int i; |
|
3222 if( iBreak<=10 ){ |
|
3223 return 0; |
|
3224 } |
|
3225 if( iBreak>=nDoc-10 ){ |
|
3226 return nDoc; |
|
3227 } |
|
3228 for(i=0; i<nMatch && aMatch[i].iCol<iCol; i++){} |
|
3229 while( i<nMatch && aMatch[i].iStart+aMatch[i].nByte<iBreak ){ i++; } |
|
3230 if( i<nMatch ){ |
|
3231 if( aMatch[i].iStart<iBreak+10 ){ |
|
3232 return aMatch[i].iStart; |
|
3233 } |
|
3234 if( i>0 && aMatch[i-1].iStart+aMatch[i-1].nByte>=iBreak ){ |
|
3235 return aMatch[i-1].iStart; |
|
3236 } |
|
3237 } |
|
3238 for(i=1; i<=10; i++){ |
|
3239 if( safe_isspace(zDoc[iBreak-i]) ){ |
|
3240 return iBreak - i + 1; |
|
3241 } |
|
3242 if( safe_isspace(zDoc[iBreak+i]) ){ |
|
3243 return iBreak + i + 1; |
|
3244 } |
|
3245 } |
|
3246 return iBreak; |
|
3247 } |
|
3248 |
|
3249 |
|
3250 |
|
3251 /* |
|
3252 ** Allowed values for Snippet.aMatch[].snStatus |
|
3253 */ |
|
3254 #define SNIPPET_IGNORE 0 /* It is ok to omit this match from the snippet */ |
|
3255 #define SNIPPET_DESIRED 1 /* We want to include this match in the snippet */ |
|
3256 |
|
3257 /* |
|
3258 ** Generate the text of a snippet. |
|
3259 */ |
|
3260 static void snippetText( |
|
3261 fulltext_cursor *pCursor, /* The cursor we need the snippet for */ |
|
3262 const char *zStartMark, /* Markup to appear before each match */ |
|
3263 const char *zEndMark, /* Markup to appear after each match */ |
|
3264 const char *zEllipsis /* Ellipsis mark */ |
|
3265 ){ |
|
3266 int i, j; |
|
3267 struct snippetMatch *aMatch; |
|
3268 int nMatch; |
|
3269 int nDesired; |
|
3270 StringBuffer sb; |
|
3271 int tailCol; |
|
3272 int tailOffset; |
|
3273 int iCol; |
|
3274 int nDoc; |
|
3275 const char *zDoc; |
|
3276 int iStart, iEnd; |
|
3277 int tailEllipsis = 0; |
|
3278 int iMatch; |
|
3279 |
|
3280 |
|
3281 sqlite3_free(pCursor->snippet.zSnippet); |
|
3282 pCursor->snippet.zSnippet = 0; |
|
3283 aMatch = pCursor->snippet.aMatch; |
|
3284 nMatch = pCursor->snippet.nMatch; |
|
3285 initStringBuffer(&sb); |
|
3286 |
|
3287 for(i=0; i<nMatch; i++){ |
|
3288 aMatch[i].snStatus = SNIPPET_IGNORE; |
|
3289 } |
|
3290 nDesired = 0; |
|
3291 for(i=0; i<pCursor->q.nTerms; i++){ |
|
3292 for(j=0; j<nMatch; j++){ |
|
3293 if( aMatch[j].iTerm==i ){ |
|
3294 aMatch[j].snStatus = SNIPPET_DESIRED; |
|
3295 nDesired++; |
|
3296 break; |
|
3297 } |
|
3298 } |
|
3299 } |
|
3300 |
|
3301 iMatch = 0; |
|
3302 tailCol = -1; |
|
3303 tailOffset = 0; |
|
3304 for(i=0; i<nMatch && nDesired>0; i++){ |
|
3305 if( aMatch[i].snStatus!=SNIPPET_DESIRED ) continue; |
|
3306 nDesired--; |
|
3307 iCol = aMatch[i].iCol; |
|
3308 zDoc = (const char*)sqlite3_column_text(pCursor->pStmt, iCol+1); |
|
3309 nDoc = sqlite3_column_bytes(pCursor->pStmt, iCol+1); |
|
3310 iStart = aMatch[i].iStart - 40; |
|
3311 iStart = wordBoundary(iStart, zDoc, nDoc, aMatch, nMatch, iCol); |
|
3312 if( iStart<=10 ){ |
|
3313 iStart = 0; |
|
3314 } |
|
3315 if( iCol==tailCol && iStart<=tailOffset+20 ){ |
|
3316 iStart = tailOffset; |
|
3317 } |
|
3318 if( (iCol!=tailCol && tailCol>=0) || iStart!=tailOffset ){ |
|
3319 trimWhiteSpace(&sb); |
|
3320 appendWhiteSpace(&sb); |
|
3321 append(&sb, zEllipsis); |
|
3322 appendWhiteSpace(&sb); |
|
3323 } |
|
3324 iEnd = aMatch[i].iStart + aMatch[i].nByte + 40; |
|
3325 iEnd = wordBoundary(iEnd, zDoc, nDoc, aMatch, nMatch, iCol); |
|
3326 if( iEnd>=nDoc-10 ){ |
|
3327 iEnd = nDoc; |
|
3328 tailEllipsis = 0; |
|
3329 }else{ |
|
3330 tailEllipsis = 1; |
|
3331 } |
|
3332 while( iMatch<nMatch && aMatch[iMatch].iCol<iCol ){ iMatch++; } |
|
3333 while( iStart<iEnd ){ |
|
3334 while( iMatch<nMatch && aMatch[iMatch].iStart<iStart |
|
3335 && aMatch[iMatch].iCol<=iCol ){ |
|
3336 iMatch++; |
|
3337 } |
|
3338 if( iMatch<nMatch && aMatch[iMatch].iStart<iEnd |
|
3339 && aMatch[iMatch].iCol==iCol ){ |
|
3340 nappend(&sb, &zDoc[iStart], aMatch[iMatch].iStart - iStart); |
|
3341 iStart = aMatch[iMatch].iStart; |
|
3342 append(&sb, zStartMark); |
|
3343 nappend(&sb, &zDoc[iStart], aMatch[iMatch].nByte); |
|
3344 append(&sb, zEndMark); |
|
3345 iStart += aMatch[iMatch].nByte; |
|
3346 for(j=iMatch+1; j<nMatch; j++){ |
|
3347 if( aMatch[j].iTerm==aMatch[iMatch].iTerm |
|
3348 && aMatch[j].snStatus==SNIPPET_DESIRED ){ |
|
3349 nDesired--; |
|
3350 aMatch[j].snStatus = SNIPPET_IGNORE; |
|
3351 } |
|
3352 } |
|
3353 }else{ |
|
3354 nappend(&sb, &zDoc[iStart], iEnd - iStart); |
|
3355 iStart = iEnd; |
|
3356 } |
|
3357 } |
|
3358 tailCol = iCol; |
|
3359 tailOffset = iEnd; |
|
3360 } |
|
3361 trimWhiteSpace(&sb); |
|
3362 if( tailEllipsis ){ |
|
3363 appendWhiteSpace(&sb); |
|
3364 append(&sb, zEllipsis); |
|
3365 } |
|
3366 pCursor->snippet.zSnippet = stringBufferData(&sb); |
|
3367 pCursor->snippet.nSnippet = stringBufferLength(&sb); |
|
3368 } |
|
3369 |
|
3370 |
|
3371 /* |
|
3372 ** Close the cursor. For additional information see the documentation |
|
3373 ** on the xClose method of the virtual table interface. |
|
3374 */ |
|
3375 static int fulltextClose(sqlite3_vtab_cursor *pCursor){ |
|
3376 fulltext_cursor *c = (fulltext_cursor *) pCursor; |
|
3377 TRACE(("FTS2 Close %p\n", c)); |
|
3378 sqlite3_finalize(c->pStmt); |
|
3379 queryClear(&c->q); |
|
3380 snippetClear(&c->snippet); |
|
3381 if( c->result.nData!=0 ) dlrDestroy(&c->reader); |
|
3382 dataBufferDestroy(&c->result); |
|
3383 sqlite3_free(c); |
|
3384 return SQLITE_OK; |
|
3385 } |
|
3386 |
|
3387 static int fulltextNext(sqlite3_vtab_cursor *pCursor){ |
|
3388 fulltext_cursor *c = (fulltext_cursor *) pCursor; |
|
3389 int rc; |
|
3390 |
|
3391 TRACE(("FTS2 Next %p\n", pCursor)); |
|
3392 snippetClear(&c->snippet); |
|
3393 if( c->iCursorType < QUERY_FULLTEXT ){ |
|
3394 /* TODO(shess) Handle SQLITE_SCHEMA AND SQLITE_BUSY. */ |
|
3395 rc = sqlite3_step(c->pStmt); |
|
3396 switch( rc ){ |
|
3397 case SQLITE_ROW: |
|
3398 c->eof = 0; |
|
3399 return SQLITE_OK; |
|
3400 case SQLITE_DONE: |
|
3401 c->eof = 1; |
|
3402 return SQLITE_OK; |
|
3403 default: |
|
3404 c->eof = 1; |
|
3405 return rc; |
|
3406 } |
|
3407 } else { /* full-text query */ |
|
3408 rc = sqlite3_reset(c->pStmt); |
|
3409 if( rc!=SQLITE_OK ) return rc; |
|
3410 |
|
3411 if( c->result.nData==0 || dlrAtEnd(&c->reader) ){ |
|
3412 c->eof = 1; |
|
3413 return SQLITE_OK; |
|
3414 } |
|
3415 rc = sqlite3_bind_int64(c->pStmt, 1, dlrDocid(&c->reader)); |
|
3416 dlrStep(&c->reader); |
|
3417 if( rc!=SQLITE_OK ) return rc; |
|
3418 /* TODO(shess) Handle SQLITE_SCHEMA AND SQLITE_BUSY. */ |
|
3419 rc = sqlite3_step(c->pStmt); |
|
3420 if( rc==SQLITE_ROW ){ /* the case we expect */ |
|
3421 c->eof = 0; |
|
3422 return SQLITE_OK; |
|
3423 } |
|
3424 /* an error occurred; abort */ |
|
3425 return rc==SQLITE_DONE ? SQLITE_ERROR : rc; |
|
3426 } |
|
3427 } |
|
3428 |
|
3429 |
|
3430 /* TODO(shess) If we pushed LeafReader to the top of the file, or to |
|
3431 ** another file, term_select() could be pushed above |
|
3432 ** docListOfTerm(). |
|
3433 */ |
|
3434 static int termSelect(fulltext_vtab *v, int iColumn, |
|
3435 const char *pTerm, int nTerm, int isPrefix, |
|
3436 DocListType iType, DataBuffer *out); |
|
3437 |
|
3438 /* Return a DocList corresponding to the query term *pTerm. If *pTerm |
|
3439 ** is the first term of a phrase query, go ahead and evaluate the phrase |
|
3440 ** query and return the doclist for the entire phrase query. |
|
3441 ** |
|
3442 ** The resulting DL_DOCIDS doclist is stored in pResult, which is |
|
3443 ** overwritten. |
|
3444 */ |
|
3445 static int docListOfTerm( |
|
3446 fulltext_vtab *v, /* The full text index */ |
|
3447 int iColumn, /* column to restrict to. No restriction if >=nColumn */ |
|
3448 QueryTerm *pQTerm, /* Term we are looking for, or 1st term of a phrase */ |
|
3449 DataBuffer *pResult /* Write the result here */ |
|
3450 ){ |
|
3451 DataBuffer left, right, new; |
|
3452 int i, rc; |
|
3453 |
|
3454 /* No phrase search if no position info. */ |
|
3455 assert( pQTerm->nPhrase==0 || DL_DEFAULT!=DL_DOCIDS ); |
|
3456 |
|
3457 /* This code should never be called with buffered updates. */ |
|
3458 assert( v->nPendingData<0 ); |
|
3459 |
|
3460 dataBufferInit(&left, 0); |
|
3461 rc = termSelect(v, iColumn, pQTerm->pTerm, pQTerm->nTerm, pQTerm->isPrefix, |
|
3462 0<pQTerm->nPhrase ? DL_POSITIONS : DL_DOCIDS, &left); |
|
3463 if( rc ) return rc; |
|
3464 for(i=1; i<=pQTerm->nPhrase && left.nData>0; i++){ |
|
3465 dataBufferInit(&right, 0); |
|
3466 rc = termSelect(v, iColumn, pQTerm[i].pTerm, pQTerm[i].nTerm, |
|
3467 pQTerm[i].isPrefix, DL_POSITIONS, &right); |
|
3468 if( rc ){ |
|
3469 dataBufferDestroy(&left); |
|
3470 return rc; |
|
3471 } |
|
3472 dataBufferInit(&new, 0); |
|
3473 docListPhraseMerge(left.pData, left.nData, right.pData, right.nData, |
|
3474 i<pQTerm->nPhrase ? DL_POSITIONS : DL_DOCIDS, &new); |
|
3475 dataBufferDestroy(&left); |
|
3476 dataBufferDestroy(&right); |
|
3477 left = new; |
|
3478 } |
|
3479 *pResult = left; |
|
3480 return SQLITE_OK; |
|
3481 } |
|
3482 |
|
3483 /* Add a new term pTerm[0..nTerm-1] to the query *q. |
|
3484 */ |
|
3485 static void queryAdd(Query *q, const char *pTerm, int nTerm){ |
|
3486 QueryTerm *t; |
|
3487 ++q->nTerms; |
|
3488 q->pTerms = sqlite3_realloc(q->pTerms, q->nTerms * sizeof(q->pTerms[0])); |
|
3489 if( q->pTerms==0 ){ |
|
3490 q->nTerms = 0; |
|
3491 return; |
|
3492 } |
|
3493 t = &q->pTerms[q->nTerms - 1]; |
|
3494 CLEAR(t); |
|
3495 t->pTerm = sqlite3_malloc(nTerm+1); |
|
3496 memcpy(t->pTerm, pTerm, nTerm); |
|
3497 t->pTerm[nTerm] = 0; |
|
3498 t->nTerm = nTerm; |
|
3499 t->isOr = q->nextIsOr; |
|
3500 t->isPrefix = 0; |
|
3501 q->nextIsOr = 0; |
|
3502 t->iColumn = q->nextColumn; |
|
3503 q->nextColumn = q->dfltColumn; |
|
3504 } |
|
3505 |
|
3506 /* |
|
3507 ** Check to see if the string zToken[0...nToken-1] matches any |
|
3508 ** column name in the virtual table. If it does, |
|
3509 ** return the zero-indexed column number. If not, return -1. |
|
3510 */ |
|
3511 static int checkColumnSpecifier( |
|
3512 fulltext_vtab *pVtab, /* The virtual table */ |
|
3513 const char *zToken, /* Text of the token */ |
|
3514 int nToken /* Number of characters in the token */ |
|
3515 ){ |
|
3516 int i; |
|
3517 for(i=0; i<pVtab->nColumn; i++){ |
|
3518 if( memcmp(pVtab->azColumn[i], zToken, nToken)==0 |
|
3519 && pVtab->azColumn[i][nToken]==0 ){ |
|
3520 return i; |
|
3521 } |
|
3522 } |
|
3523 return -1; |
|
3524 } |
|
3525 |
|
3526 /* |
|
3527 ** Parse the text at pSegment[0..nSegment-1]. Add additional terms |
|
3528 ** to the query being assemblied in pQuery. |
|
3529 ** |
|
3530 ** inPhrase is true if pSegment[0..nSegement-1] is contained within |
|
3531 ** double-quotes. If inPhrase is true, then the first term |
|
3532 ** is marked with the number of terms in the phrase less one and |
|
3533 ** OR and "-" syntax is ignored. If inPhrase is false, then every |
|
3534 ** term found is marked with nPhrase=0 and OR and "-" syntax is significant. |
|
3535 */ |
|
3536 static int tokenizeSegment( |
|
3537 sqlite3_tokenizer *pTokenizer, /* The tokenizer to use */ |
|
3538 const char *pSegment, int nSegment, /* Query expression being parsed */ |
|
3539 int inPhrase, /* True if within "..." */ |
|
3540 Query *pQuery /* Append results here */ |
|
3541 ){ |
|
3542 const sqlite3_tokenizer_module *pModule = pTokenizer->pModule; |
|
3543 sqlite3_tokenizer_cursor *pCursor; |
|
3544 int firstIndex = pQuery->nTerms; |
|
3545 int iCol; |
|
3546 int nTerm = 1; |
|
3547 |
|
3548 int rc = pModule->xOpen(pTokenizer, pSegment, nSegment, &pCursor); |
|
3549 if( rc!=SQLITE_OK ) return rc; |
|
3550 pCursor->pTokenizer = pTokenizer; |
|
3551 |
|
3552 while( 1 ){ |
|
3553 const char *pToken; |
|
3554 int nToken, iBegin, iEnd, iPos; |
|
3555 |
|
3556 rc = pModule->xNext(pCursor, |
|
3557 &pToken, &nToken, |
|
3558 &iBegin, &iEnd, &iPos); |
|
3559 if( rc!=SQLITE_OK ) break; |
|
3560 if( !inPhrase && |
|
3561 pSegment[iEnd]==':' && |
|
3562 (iCol = checkColumnSpecifier(pQuery->pFts, pToken, nToken))>=0 ){ |
|
3563 pQuery->nextColumn = iCol; |
|
3564 continue; |
|
3565 } |
|
3566 if( !inPhrase && pQuery->nTerms>0 && nToken==2 |
|
3567 && pSegment[iBegin]=='O' && pSegment[iBegin+1]=='R' ){ |
|
3568 pQuery->nextIsOr = 1; |
|
3569 continue; |
|
3570 } |
|
3571 queryAdd(pQuery, pToken, nToken); |
|
3572 if( !inPhrase && iBegin>0 && pSegment[iBegin-1]=='-' ){ |
|
3573 pQuery->pTerms[pQuery->nTerms-1].isNot = 1; |
|
3574 } |
|
3575 if( iEnd<nSegment && pSegment[iEnd]=='*' ){ |
|
3576 pQuery->pTerms[pQuery->nTerms-1].isPrefix = 1; |
|
3577 } |
|
3578 pQuery->pTerms[pQuery->nTerms-1].iPhrase = nTerm; |
|
3579 if( inPhrase ){ |
|
3580 nTerm++; |
|
3581 } |
|
3582 } |
|
3583 |
|
3584 if( inPhrase && pQuery->nTerms>firstIndex ){ |
|
3585 pQuery->pTerms[firstIndex].nPhrase = pQuery->nTerms - firstIndex - 1; |
|
3586 } |
|
3587 |
|
3588 return pModule->xClose(pCursor); |
|
3589 } |
|
3590 |
|
3591 /* Parse a query string, yielding a Query object pQuery. |
|
3592 ** |
|
3593 ** The calling function will need to queryClear() to clean up |
|
3594 ** the dynamically allocated memory held by pQuery. |
|
3595 */ |
|
3596 static int parseQuery( |
|
3597 fulltext_vtab *v, /* The fulltext index */ |
|
3598 const char *zInput, /* Input text of the query string */ |
|
3599 int nInput, /* Size of the input text */ |
|
3600 int dfltColumn, /* Default column of the index to match against */ |
|
3601 Query *pQuery /* Write the parse results here. */ |
|
3602 ){ |
|
3603 int iInput, inPhrase = 0; |
|
3604 |
|
3605 if( zInput==0 ) nInput = 0; |
|
3606 if( nInput<0 ) nInput = strlen(zInput); |
|
3607 pQuery->nTerms = 0; |
|
3608 pQuery->pTerms = NULL; |
|
3609 pQuery->nextIsOr = 0; |
|
3610 pQuery->nextColumn = dfltColumn; |
|
3611 pQuery->dfltColumn = dfltColumn; |
|
3612 pQuery->pFts = v; |
|
3613 |
|
3614 for(iInput=0; iInput<nInput; ++iInput){ |
|
3615 int i; |
|
3616 for(i=iInput; i<nInput && zInput[i]!='"'; ++i){} |
|
3617 if( i>iInput ){ |
|
3618 tokenizeSegment(v->pTokenizer, zInput+iInput, i-iInput, inPhrase, |
|
3619 pQuery); |
|
3620 } |
|
3621 iInput = i; |
|
3622 if( i<nInput ){ |
|
3623 assert( zInput[i]=='"' ); |
|
3624 inPhrase = !inPhrase; |
|
3625 } |
|
3626 } |
|
3627 |
|
3628 if( inPhrase ){ |
|
3629 /* unmatched quote */ |
|
3630 queryClear(pQuery); |
|
3631 return SQLITE_ERROR; |
|
3632 } |
|
3633 return SQLITE_OK; |
|
3634 } |
|
3635 |
|
3636 /* TODO(shess) Refactor the code to remove this forward decl. */ |
|
3637 static int flushPendingTerms(fulltext_vtab *v); |
|
3638 |
|
3639 /* Perform a full-text query using the search expression in |
|
3640 ** zInput[0..nInput-1]. Return a list of matching documents |
|
3641 ** in pResult. |
|
3642 ** |
|
3643 ** Queries must match column iColumn. Or if iColumn>=nColumn |
|
3644 ** they are allowed to match against any column. |
|
3645 */ |
|
3646 static int fulltextQuery( |
|
3647 fulltext_vtab *v, /* The full text index */ |
|
3648 int iColumn, /* Match against this column by default */ |
|
3649 const char *zInput, /* The query string */ |
|
3650 int nInput, /* Number of bytes in zInput[] */ |
|
3651 DataBuffer *pResult, /* Write the result doclist here */ |
|
3652 Query *pQuery /* Put parsed query string here */ |
|
3653 ){ |
|
3654 int i, iNext, rc; |
|
3655 DataBuffer left, right, or, new; |
|
3656 int nNot = 0; |
|
3657 QueryTerm *aTerm; |
|
3658 |
|
3659 /* TODO(shess) Instead of flushing pendingTerms, we could query for |
|
3660 ** the relevant term and merge the doclist into what we receive from |
|
3661 ** the database. Wait and see if this is a common issue, first. |
|
3662 ** |
|
3663 ** A good reason not to flush is to not generate update-related |
|
3664 ** error codes from here. |
|
3665 */ |
|
3666 |
|
3667 /* Flush any buffered updates before executing the query. */ |
|
3668 rc = flushPendingTerms(v); |
|
3669 if( rc!=SQLITE_OK ) return rc; |
|
3670 |
|
3671 /* TODO(shess) I think that the queryClear() calls below are not |
|
3672 ** necessary, because fulltextClose() already clears the query. |
|
3673 */ |
|
3674 rc = parseQuery(v, zInput, nInput, iColumn, pQuery); |
|
3675 if( rc!=SQLITE_OK ) return rc; |
|
3676 |
|
3677 /* Empty or NULL queries return no results. */ |
|
3678 if( pQuery->nTerms==0 ){ |
|
3679 dataBufferInit(pResult, 0); |
|
3680 return SQLITE_OK; |
|
3681 } |
|
3682 |
|
3683 /* Merge AND terms. */ |
|
3684 /* TODO(shess) I think we can early-exit if( i>nNot && left.nData==0 ). */ |
|
3685 aTerm = pQuery->pTerms; |
|
3686 for(i = 0; i<pQuery->nTerms; i=iNext){ |
|
3687 if( aTerm[i].isNot ){ |
|
3688 /* Handle all NOT terms in a separate pass */ |
|
3689 nNot++; |
|
3690 iNext = i + aTerm[i].nPhrase+1; |
|
3691 continue; |
|
3692 } |
|
3693 iNext = i + aTerm[i].nPhrase + 1; |
|
3694 rc = docListOfTerm(v, aTerm[i].iColumn, &aTerm[i], &right); |
|
3695 if( rc ){ |
|
3696 if( i!=nNot ) dataBufferDestroy(&left); |
|
3697 queryClear(pQuery); |
|
3698 return rc; |
|
3699 } |
|
3700 while( iNext<pQuery->nTerms && aTerm[iNext].isOr ){ |
|
3701 rc = docListOfTerm(v, aTerm[iNext].iColumn, &aTerm[iNext], &or); |
|
3702 iNext += aTerm[iNext].nPhrase + 1; |
|
3703 if( rc ){ |
|
3704 if( i!=nNot ) dataBufferDestroy(&left); |
|
3705 dataBufferDestroy(&right); |
|
3706 queryClear(pQuery); |
|
3707 return rc; |
|
3708 } |
|
3709 dataBufferInit(&new, 0); |
|
3710 docListOrMerge(right.pData, right.nData, or.pData, or.nData, &new); |
|
3711 dataBufferDestroy(&right); |
|
3712 dataBufferDestroy(&or); |
|
3713 right = new; |
|
3714 } |
|
3715 if( i==nNot ){ /* first term processed. */ |
|
3716 left = right; |
|
3717 }else{ |
|
3718 dataBufferInit(&new, 0); |
|
3719 docListAndMerge(left.pData, left.nData, right.pData, right.nData, &new); |
|
3720 dataBufferDestroy(&right); |
|
3721 dataBufferDestroy(&left); |
|
3722 left = new; |
|
3723 } |
|
3724 } |
|
3725 |
|
3726 if( nNot==pQuery->nTerms ){ |
|
3727 /* We do not yet know how to handle a query of only NOT terms */ |
|
3728 return SQLITE_ERROR; |
|
3729 } |
|
3730 |
|
3731 /* Do the EXCEPT terms */ |
|
3732 for(i=0; i<pQuery->nTerms; i += aTerm[i].nPhrase + 1){ |
|
3733 if( !aTerm[i].isNot ) continue; |
|
3734 rc = docListOfTerm(v, aTerm[i].iColumn, &aTerm[i], &right); |
|
3735 if( rc ){ |
|
3736 queryClear(pQuery); |
|
3737 dataBufferDestroy(&left); |
|
3738 return rc; |
|
3739 } |
|
3740 dataBufferInit(&new, 0); |
|
3741 docListExceptMerge(left.pData, left.nData, right.pData, right.nData, &new); |
|
3742 dataBufferDestroy(&right); |
|
3743 dataBufferDestroy(&left); |
|
3744 left = new; |
|
3745 } |
|
3746 |
|
3747 *pResult = left; |
|
3748 return rc; |
|
3749 } |
|
3750 |
|
3751 /* |
|
3752 ** This is the xFilter interface for the virtual table. See |
|
3753 ** the virtual table xFilter method documentation for additional |
|
3754 ** information. |
|
3755 ** |
|
3756 ** If idxNum==QUERY_GENERIC then do a full table scan against |
|
3757 ** the %_content table. |
|
3758 ** |
|
3759 ** If idxNum==QUERY_ROWID then do a rowid lookup for a single entry |
|
3760 ** in the %_content table. |
|
3761 ** |
|
3762 ** If idxNum>=QUERY_FULLTEXT then use the full text index. The |
|
3763 ** column on the left-hand side of the MATCH operator is column |
|
3764 ** number idxNum-QUERY_FULLTEXT, 0 indexed. argv[0] is the right-hand |
|
3765 ** side of the MATCH operator. |
|
3766 */ |
|
3767 /* TODO(shess) Upgrade the cursor initialization and destruction to |
|
3768 ** account for fulltextFilter() being called multiple times on the |
|
3769 ** same cursor. The current solution is very fragile. Apply fix to |
|
3770 ** fts2 as appropriate. |
|
3771 */ |
|
3772 static int fulltextFilter( |
|
3773 sqlite3_vtab_cursor *pCursor, /* The cursor used for this query */ |
|
3774 int idxNum, const char *idxStr, /* Which indexing scheme to use */ |
|
3775 int argc, sqlite3_value **argv /* Arguments for the indexing scheme */ |
|
3776 ){ |
|
3777 fulltext_cursor *c = (fulltext_cursor *) pCursor; |
|
3778 fulltext_vtab *v = cursor_vtab(c); |
|
3779 int rc; |
|
3780 |
|
3781 TRACE(("FTS2 Filter %p\n",pCursor)); |
|
3782 |
|
3783 /* If the cursor has a statement that was not prepared according to |
|
3784 ** idxNum, clear it. I believe all calls to fulltextFilter with a |
|
3785 ** given cursor will have the same idxNum , but in this case it's |
|
3786 ** easy to be safe. |
|
3787 */ |
|
3788 if( c->pStmt && c->iCursorType!=idxNum ){ |
|
3789 sqlite3_finalize(c->pStmt); |
|
3790 c->pStmt = NULL; |
|
3791 } |
|
3792 |
|
3793 /* Get a fresh statement appropriate to idxNum. */ |
|
3794 /* TODO(shess): Add a prepared-statement cache in the vt structure. |
|
3795 ** The cache must handle multiple open cursors. Easier to cache the |
|
3796 ** statement variants at the vt to reduce malloc/realloc/free here. |
|
3797 ** Or we could have a StringBuffer variant which allowed stack |
|
3798 ** construction for small values. |
|
3799 */ |
|
3800 if( !c->pStmt ){ |
|
3801 char *zSql = sqlite3_mprintf("select rowid, * from %%_content %s", |
|
3802 idxNum==QUERY_GENERIC ? "" : "where rowid=?"); |
|
3803 rc = sql_prepare(v->db, v->zDb, v->zName, &c->pStmt, zSql); |
|
3804 sqlite3_free(zSql); |
|
3805 if( rc!=SQLITE_OK ) return rc; |
|
3806 c->iCursorType = idxNum; |
|
3807 }else{ |
|
3808 sqlite3_reset(c->pStmt); |
|
3809 assert( c->iCursorType==idxNum ); |
|
3810 } |
|
3811 |
|
3812 switch( idxNum ){ |
|
3813 case QUERY_GENERIC: |
|
3814 break; |
|
3815 |
|
3816 case QUERY_ROWID: |
|
3817 rc = sqlite3_bind_int64(c->pStmt, 1, sqlite3_value_int64(argv[0])); |
|
3818 if( rc!=SQLITE_OK ) return rc; |
|
3819 break; |
|
3820 |
|
3821 default: /* full-text search */ |
|
3822 { |
|
3823 const char *zQuery = (const char *)sqlite3_value_text(argv[0]); |
|
3824 assert( idxNum<=QUERY_FULLTEXT+v->nColumn); |
|
3825 assert( argc==1 ); |
|
3826 queryClear(&c->q); |
|
3827 if( c->result.nData!=0 ){ |
|
3828 /* This case happens if the same cursor is used repeatedly. */ |
|
3829 dlrDestroy(&c->reader); |
|
3830 dataBufferReset(&c->result); |
|
3831 }else{ |
|
3832 dataBufferInit(&c->result, 0); |
|
3833 } |
|
3834 rc = fulltextQuery(v, idxNum-QUERY_FULLTEXT, zQuery, -1, &c->result, &c->q); |
|
3835 if( rc!=SQLITE_OK ) return rc; |
|
3836 if( c->result.nData!=0 ){ |
|
3837 dlrInit(&c->reader, DL_DOCIDS, c->result.pData, c->result.nData); |
|
3838 } |
|
3839 break; |
|
3840 } |
|
3841 } |
|
3842 |
|
3843 return fulltextNext(pCursor); |
|
3844 } |
|
3845 |
|
3846 /* This is the xEof method of the virtual table. The SQLite core |
|
3847 ** calls this routine to find out if it has reached the end of |
|
3848 ** a query's results set. |
|
3849 */ |
|
3850 static int fulltextEof(sqlite3_vtab_cursor *pCursor){ |
|
3851 fulltext_cursor *c = (fulltext_cursor *) pCursor; |
|
3852 return c->eof; |
|
3853 } |
|
3854 |
|
3855 /* This is the xColumn method of the virtual table. The SQLite |
|
3856 ** core calls this method during a query when it needs the value |
|
3857 ** of a column from the virtual table. This method needs to use |
|
3858 ** one of the sqlite3_result_*() routines to store the requested |
|
3859 ** value back in the pContext. |
|
3860 */ |
|
3861 static int fulltextColumn(sqlite3_vtab_cursor *pCursor, |
|
3862 sqlite3_context *pContext, int idxCol){ |
|
3863 fulltext_cursor *c = (fulltext_cursor *) pCursor; |
|
3864 fulltext_vtab *v = cursor_vtab(c); |
|
3865 |
|
3866 if( idxCol<v->nColumn ){ |
|
3867 sqlite3_value *pVal = sqlite3_column_value(c->pStmt, idxCol+1); |
|
3868 sqlite3_result_value(pContext, pVal); |
|
3869 }else if( idxCol==v->nColumn ){ |
|
3870 /* The extra column whose name is the same as the table. |
|
3871 ** Return a blob which is a pointer to the cursor |
|
3872 */ |
|
3873 sqlite3_result_blob(pContext, &c, sizeof(c), SQLITE_TRANSIENT); |
|
3874 } |
|
3875 return SQLITE_OK; |
|
3876 } |
|
3877 |
|
3878 /* This is the xRowid method. The SQLite core calls this routine to |
|
3879 ** retrive the rowid for the current row of the result set. The |
|
3880 ** rowid should be written to *pRowid. |
|
3881 */ |
|
3882 static int fulltextRowid(sqlite3_vtab_cursor *pCursor, sqlite_int64 *pRowid){ |
|
3883 fulltext_cursor *c = (fulltext_cursor *) pCursor; |
|
3884 |
|
3885 *pRowid = sqlite3_column_int64(c->pStmt, 0); |
|
3886 return SQLITE_OK; |
|
3887 } |
|
3888 |
|
3889 /* Add all terms in [zText] to pendingTerms table. If [iColumn] > 0, |
|
3890 ** we also store positions and offsets in the hash table using that |
|
3891 ** column number. |
|
3892 */ |
|
3893 static int buildTerms(fulltext_vtab *v, sqlite_int64 iDocid, |
|
3894 const char *zText, int iColumn){ |
|
3895 sqlite3_tokenizer *pTokenizer = v->pTokenizer; |
|
3896 sqlite3_tokenizer_cursor *pCursor; |
|
3897 const char *pToken; |
|
3898 int nTokenBytes; |
|
3899 int iStartOffset, iEndOffset, iPosition; |
|
3900 int rc; |
|
3901 |
|
3902 rc = pTokenizer->pModule->xOpen(pTokenizer, zText, -1, &pCursor); |
|
3903 if( rc!=SQLITE_OK ) return rc; |
|
3904 |
|
3905 pCursor->pTokenizer = pTokenizer; |
|
3906 while( SQLITE_OK==(rc=pTokenizer->pModule->xNext(pCursor, |
|
3907 &pToken, &nTokenBytes, |
|
3908 &iStartOffset, &iEndOffset, |
|
3909 &iPosition)) ){ |
|
3910 DLCollector *p; |
|
3911 int nData; /* Size of doclist before our update. */ |
|
3912 |
|
3913 /* Positions can't be negative; we use -1 as a terminator |
|
3914 * internally. Token can't be NULL or empty. */ |
|
3915 if( iPosition<0 || pToken == NULL || nTokenBytes == 0 ){ |
|
3916 rc = SQLITE_ERROR; |
|
3917 break; |
|
3918 } |
|
3919 |
|
3920 p = fts2HashFind(&v->pendingTerms, pToken, nTokenBytes); |
|
3921 if( p==NULL ){ |
|
3922 nData = 0; |
|
3923 p = dlcNew(iDocid, DL_DEFAULT); |
|
3924 fts2HashInsert(&v->pendingTerms, pToken, nTokenBytes, p); |
|
3925 |
|
3926 /* Overhead for our hash table entry, the key, and the value. */ |
|
3927 v->nPendingData += sizeof(struct fts2HashElem)+sizeof(*p)+nTokenBytes; |
|
3928 }else{ |
|
3929 nData = p->b.nData; |
|
3930 if( p->dlw.iPrevDocid!=iDocid ) dlcNext(p, iDocid); |
|
3931 } |
|
3932 if( iColumn>=0 ){ |
|
3933 dlcAddPos(p, iColumn, iPosition, iStartOffset, iEndOffset); |
|
3934 } |
|
3935 |
|
3936 /* Accumulate data added by dlcNew or dlcNext, and dlcAddPos. */ |
|
3937 v->nPendingData += p->b.nData-nData; |
|
3938 } |
|
3939 |
|
3940 /* TODO(shess) Check return? Should this be able to cause errors at |
|
3941 ** this point? Actually, same question about sqlite3_finalize(), |
|
3942 ** though one could argue that failure there means that the data is |
|
3943 ** not durable. *ponder* |
|
3944 */ |
|
3945 pTokenizer->pModule->xClose(pCursor); |
|
3946 if( SQLITE_DONE == rc ) return SQLITE_OK; |
|
3947 return rc; |
|
3948 } |
|
3949 |
|
3950 /* Add doclists for all terms in [pValues] to pendingTerms table. */ |
|
3951 static int insertTerms(fulltext_vtab *v, sqlite_int64 iRowid, |
|
3952 sqlite3_value **pValues){ |
|
3953 int i; |
|
3954 for(i = 0; i < v->nColumn ; ++i){ |
|
3955 char *zText = (char*)sqlite3_value_text(pValues[i]); |
|
3956 int rc = buildTerms(v, iRowid, zText, i); |
|
3957 if( rc!=SQLITE_OK ) return rc; |
|
3958 } |
|
3959 return SQLITE_OK; |
|
3960 } |
|
3961 |
|
3962 /* Add empty doclists for all terms in the given row's content to |
|
3963 ** pendingTerms. |
|
3964 */ |
|
3965 static int deleteTerms(fulltext_vtab *v, sqlite_int64 iRowid){ |
|
3966 const char **pValues; |
|
3967 int i, rc; |
|
3968 |
|
3969 /* TODO(shess) Should we allow such tables at all? */ |
|
3970 if( DL_DEFAULT==DL_DOCIDS ) return SQLITE_ERROR; |
|
3971 |
|
3972 rc = content_select(v, iRowid, &pValues); |
|
3973 if( rc!=SQLITE_OK ) return rc; |
|
3974 |
|
3975 for(i = 0 ; i < v->nColumn; ++i) { |
|
3976 rc = buildTerms(v, iRowid, pValues[i], -1); |
|
3977 if( rc!=SQLITE_OK ) break; |
|
3978 } |
|
3979 |
|
3980 freeStringArray(v->nColumn, pValues); |
|
3981 return SQLITE_OK; |
|
3982 } |
|
3983 |
|
3984 /* TODO(shess) Refactor the code to remove this forward decl. */ |
|
3985 static int initPendingTerms(fulltext_vtab *v, sqlite_int64 iDocid); |
|
3986 |
|
3987 /* Insert a row into the %_content table; set *piRowid to be the ID of the |
|
3988 ** new row. Add doclists for terms to pendingTerms. |
|
3989 */ |
|
3990 static int index_insert(fulltext_vtab *v, sqlite3_value *pRequestRowid, |
|
3991 sqlite3_value **pValues, sqlite_int64 *piRowid){ |
|
3992 int rc; |
|
3993 |
|
3994 rc = content_insert(v, pRequestRowid, pValues); /* execute an SQL INSERT */ |
|
3995 if( rc!=SQLITE_OK ) return rc; |
|
3996 |
|
3997 *piRowid = sqlite3_last_insert_rowid(v->db); |
|
3998 rc = initPendingTerms(v, *piRowid); |
|
3999 if( rc!=SQLITE_OK ) return rc; |
|
4000 |
|
4001 return insertTerms(v, *piRowid, pValues); |
|
4002 } |
|
4003 |
|
4004 /* Delete a row from the %_content table; add empty doclists for terms |
|
4005 ** to pendingTerms. |
|
4006 */ |
|
4007 static int index_delete(fulltext_vtab *v, sqlite_int64 iRow){ |
|
4008 int rc = initPendingTerms(v, iRow); |
|
4009 if( rc!=SQLITE_OK ) return rc; |
|
4010 |
|
4011 rc = deleteTerms(v, iRow); |
|
4012 if( rc!=SQLITE_OK ) return rc; |
|
4013 |
|
4014 return content_delete(v, iRow); /* execute an SQL DELETE */ |
|
4015 } |
|
4016 |
|
4017 /* Update a row in the %_content table; add delete doclists to |
|
4018 ** pendingTerms for old terms not in the new data, add insert doclists |
|
4019 ** to pendingTerms for terms in the new data. |
|
4020 */ |
|
4021 static int index_update(fulltext_vtab *v, sqlite_int64 iRow, |
|
4022 sqlite3_value **pValues){ |
|
4023 int rc = initPendingTerms(v, iRow); |
|
4024 if( rc!=SQLITE_OK ) return rc; |
|
4025 |
|
4026 /* Generate an empty doclist for each term that previously appeared in this |
|
4027 * row. */ |
|
4028 rc = deleteTerms(v, iRow); |
|
4029 if( rc!=SQLITE_OK ) return rc; |
|
4030 |
|
4031 rc = content_update(v, pValues, iRow); /* execute an SQL UPDATE */ |
|
4032 if( rc!=SQLITE_OK ) return rc; |
|
4033 |
|
4034 /* Now add positions for terms which appear in the updated row. */ |
|
4035 return insertTerms(v, iRow, pValues); |
|
4036 } |
|
4037 |
|
4038 /*******************************************************************/ |
|
4039 /* InteriorWriter is used to collect terms and block references into |
|
4040 ** interior nodes in %_segments. See commentary at top of file for |
|
4041 ** format. |
|
4042 */ |
|
4043 |
|
4044 /* How large interior nodes can grow. */ |
|
4045 #define INTERIOR_MAX 2048 |
|
4046 |
|
4047 /* Minimum number of terms per interior node (except the root). This |
|
4048 ** prevents large terms from making the tree too skinny - must be >0 |
|
4049 ** so that the tree always makes progress. Note that the min tree |
|
4050 ** fanout will be INTERIOR_MIN_TERMS+1. |
|
4051 */ |
|
4052 #define INTERIOR_MIN_TERMS 7 |
|
4053 #if INTERIOR_MIN_TERMS<1 |
|
4054 # error INTERIOR_MIN_TERMS must be greater than 0. |
|
4055 #endif |
|
4056 |
|
4057 /* ROOT_MAX controls how much data is stored inline in the segment |
|
4058 ** directory. |
|
4059 */ |
|
4060 /* TODO(shess) Push ROOT_MAX down to whoever is writing things. It's |
|
4061 ** only here so that interiorWriterRootInfo() and leafWriterRootInfo() |
|
4062 ** can both see it, but if the caller passed it in, we wouldn't even |
|
4063 ** need a define. |
|
4064 */ |
|
4065 #define ROOT_MAX 1024 |
|
4066 #if ROOT_MAX<VARINT_MAX*2 |
|
4067 # error ROOT_MAX must have enough space for a header. |
|
4068 #endif |
|
4069 |
|
4070 /* InteriorBlock stores a linked-list of interior blocks while a lower |
|
4071 ** layer is being constructed. |
|
4072 */ |
|
4073 typedef struct InteriorBlock { |
|
4074 DataBuffer term; /* Leftmost term in block's subtree. */ |
|
4075 DataBuffer data; /* Accumulated data for the block. */ |
|
4076 struct InteriorBlock *next; |
|
4077 } InteriorBlock; |
|
4078 |
|
4079 static InteriorBlock *interiorBlockNew(int iHeight, sqlite_int64 iChildBlock, |
|
4080 const char *pTerm, int nTerm){ |
|
4081 InteriorBlock *block = sqlite3_malloc(sizeof(InteriorBlock)); |
|
4082 char c[VARINT_MAX+VARINT_MAX]; |
|
4083 int n; |
|
4084 |
|
4085 if( block ){ |
|
4086 memset(block, 0, sizeof(*block)); |
|
4087 dataBufferInit(&block->term, 0); |
|
4088 dataBufferReplace(&block->term, pTerm, nTerm); |
|
4089 |
|
4090 n = putVarint(c, iHeight); |
|
4091 n += putVarint(c+n, iChildBlock); |
|
4092 dataBufferInit(&block->data, INTERIOR_MAX); |
|
4093 dataBufferReplace(&block->data, c, n); |
|
4094 } |
|
4095 return block; |
|
4096 } |
|
4097 |
|
4098 #ifndef NDEBUG |
|
4099 /* Verify that the data is readable as an interior node. */ |
|
4100 static void interiorBlockValidate(InteriorBlock *pBlock){ |
|
4101 const char *pData = pBlock->data.pData; |
|
4102 int nData = pBlock->data.nData; |
|
4103 int n, iDummy; |
|
4104 sqlite_int64 iBlockid; |
|
4105 |
|
4106 assert( nData>0 ); |
|
4107 assert( pData!=0 ); |
|
4108 assert( pData+nData>pData ); |
|
4109 |
|
4110 /* Must lead with height of node as a varint(n), n>0 */ |
|
4111 n = getVarint32(pData, &iDummy); |
|
4112 assert( n>0 ); |
|
4113 assert( iDummy>0 ); |
|
4114 assert( n<nData ); |
|
4115 pData += n; |
|
4116 nData -= n; |
|
4117 |
|
4118 /* Must contain iBlockid. */ |
|
4119 n = getVarint(pData, &iBlockid); |
|
4120 assert( n>0 ); |
|
4121 assert( n<=nData ); |
|
4122 pData += n; |
|
4123 nData -= n; |
|
4124 |
|
4125 /* Zero or more terms of positive length */ |
|
4126 if( nData!=0 ){ |
|
4127 /* First term is not delta-encoded. */ |
|
4128 n = getVarint32(pData, &iDummy); |
|
4129 assert( n>0 ); |
|
4130 assert( iDummy>0 ); |
|
4131 assert( n+iDummy>0); |
|
4132 assert( n+iDummy<=nData ); |
|
4133 pData += n+iDummy; |
|
4134 nData -= n+iDummy; |
|
4135 |
|
4136 /* Following terms delta-encoded. */ |
|
4137 while( nData!=0 ){ |
|
4138 /* Length of shared prefix. */ |
|
4139 n = getVarint32(pData, &iDummy); |
|
4140 assert( n>0 ); |
|
4141 assert( iDummy>=0 ); |
|
4142 assert( n<nData ); |
|
4143 pData += n; |
|
4144 nData -= n; |
|
4145 |
|
4146 /* Length and data of distinct suffix. */ |
|
4147 n = getVarint32(pData, &iDummy); |
|
4148 assert( n>0 ); |
|
4149 assert( iDummy>0 ); |
|
4150 assert( n+iDummy>0); |
|
4151 assert( n+iDummy<=nData ); |
|
4152 pData += n+iDummy; |
|
4153 nData -= n+iDummy; |
|
4154 } |
|
4155 } |
|
4156 } |
|
4157 #define ASSERT_VALID_INTERIOR_BLOCK(x) interiorBlockValidate(x) |
|
4158 #else |
|
4159 #define ASSERT_VALID_INTERIOR_BLOCK(x) assert( 1 ) |
|
4160 #endif |
|
4161 |
|
4162 typedef struct InteriorWriter { |
|
4163 int iHeight; /* from 0 at leaves. */ |
|
4164 InteriorBlock *first, *last; |
|
4165 struct InteriorWriter *parentWriter; |
|
4166 |
|
4167 DataBuffer term; /* Last term written to block "last". */ |
|
4168 sqlite_int64 iOpeningChildBlock; /* First child block in block "last". */ |
|
4169 #ifndef NDEBUG |
|
4170 sqlite_int64 iLastChildBlock; /* for consistency checks. */ |
|
4171 #endif |
|
4172 } InteriorWriter; |
|
4173 |
|
4174 /* Initialize an interior node where pTerm[nTerm] marks the leftmost |
|
4175 ** term in the tree. iChildBlock is the leftmost child block at the |
|
4176 ** next level down the tree. |
|
4177 */ |
|
4178 static void interiorWriterInit(int iHeight, const char *pTerm, int nTerm, |
|
4179 sqlite_int64 iChildBlock, |
|
4180 InteriorWriter *pWriter){ |
|
4181 InteriorBlock *block; |
|
4182 assert( iHeight>0 ); |
|
4183 CLEAR(pWriter); |
|
4184 |
|
4185 pWriter->iHeight = iHeight; |
|
4186 pWriter->iOpeningChildBlock = iChildBlock; |
|
4187 #ifndef NDEBUG |
|
4188 pWriter->iLastChildBlock = iChildBlock; |
|
4189 #endif |
|
4190 block = interiorBlockNew(iHeight, iChildBlock, pTerm, nTerm); |
|
4191 pWriter->last = pWriter->first = block; |
|
4192 ASSERT_VALID_INTERIOR_BLOCK(pWriter->last); |
|
4193 dataBufferInit(&pWriter->term, 0); |
|
4194 } |
|
4195 |
|
4196 /* Append the child node rooted at iChildBlock to the interior node, |
|
4197 ** with pTerm[nTerm] as the leftmost term in iChildBlock's subtree. |
|
4198 */ |
|
4199 static void interiorWriterAppend(InteriorWriter *pWriter, |
|
4200 const char *pTerm, int nTerm, |
|
4201 sqlite_int64 iChildBlock){ |
|
4202 char c[VARINT_MAX+VARINT_MAX]; |
|
4203 int n, nPrefix = 0; |
|
4204 |
|
4205 ASSERT_VALID_INTERIOR_BLOCK(pWriter->last); |
|
4206 |
|
4207 /* The first term written into an interior node is actually |
|
4208 ** associated with the second child added (the first child was added |
|
4209 ** in interiorWriterInit, or in the if clause at the bottom of this |
|
4210 ** function). That term gets encoded straight up, with nPrefix left |
|
4211 ** at 0. |
|
4212 */ |
|
4213 if( pWriter->term.nData==0 ){ |
|
4214 n = putVarint(c, nTerm); |
|
4215 }else{ |
|
4216 while( nPrefix<pWriter->term.nData && |
|
4217 pTerm[nPrefix]==pWriter->term.pData[nPrefix] ){ |
|
4218 nPrefix++; |
|
4219 } |
|
4220 |
|
4221 n = putVarint(c, nPrefix); |
|
4222 n += putVarint(c+n, nTerm-nPrefix); |
|
4223 } |
|
4224 |
|
4225 #ifndef NDEBUG |
|
4226 pWriter->iLastChildBlock++; |
|
4227 #endif |
|
4228 assert( pWriter->iLastChildBlock==iChildBlock ); |
|
4229 |
|
4230 /* Overflow to a new block if the new term makes the current block |
|
4231 ** too big, and the current block already has enough terms. |
|
4232 */ |
|
4233 if( pWriter->last->data.nData+n+nTerm-nPrefix>INTERIOR_MAX && |
|
4234 iChildBlock-pWriter->iOpeningChildBlock>INTERIOR_MIN_TERMS ){ |
|
4235 pWriter->last->next = interiorBlockNew(pWriter->iHeight, iChildBlock, |
|
4236 pTerm, nTerm); |
|
4237 pWriter->last = pWriter->last->next; |
|
4238 pWriter->iOpeningChildBlock = iChildBlock; |
|
4239 dataBufferReset(&pWriter->term); |
|
4240 }else{ |
|
4241 dataBufferAppend2(&pWriter->last->data, c, n, |
|
4242 pTerm+nPrefix, nTerm-nPrefix); |
|
4243 dataBufferReplace(&pWriter->term, pTerm, nTerm); |
|
4244 } |
|
4245 ASSERT_VALID_INTERIOR_BLOCK(pWriter->last); |
|
4246 } |
|
4247 |
|
4248 /* Free the space used by pWriter, including the linked-list of |
|
4249 ** InteriorBlocks, and parentWriter, if present. |
|
4250 */ |
|
4251 static int interiorWriterDestroy(InteriorWriter *pWriter){ |
|
4252 InteriorBlock *block = pWriter->first; |
|
4253 |
|
4254 while( block!=NULL ){ |
|
4255 InteriorBlock *b = block; |
|
4256 block = block->next; |
|
4257 dataBufferDestroy(&b->term); |
|
4258 dataBufferDestroy(&b->data); |
|
4259 sqlite3_free(b); |
|
4260 } |
|
4261 if( pWriter->parentWriter!=NULL ){ |
|
4262 interiorWriterDestroy(pWriter->parentWriter); |
|
4263 sqlite3_free(pWriter->parentWriter); |
|
4264 } |
|
4265 dataBufferDestroy(&pWriter->term); |
|
4266 SCRAMBLE(pWriter); |
|
4267 return SQLITE_OK; |
|
4268 } |
|
4269 |
|
4270 /* If pWriter can fit entirely in ROOT_MAX, return it as the root info |
|
4271 ** directly, leaving *piEndBlockid unchanged. Otherwise, flush |
|
4272 ** pWriter to %_segments, building a new layer of interior nodes, and |
|
4273 ** recursively ask for their root into. |
|
4274 */ |
|
4275 static int interiorWriterRootInfo(fulltext_vtab *v, InteriorWriter *pWriter, |
|
4276 char **ppRootInfo, int *pnRootInfo, |
|
4277 sqlite_int64 *piEndBlockid){ |
|
4278 InteriorBlock *block = pWriter->first; |
|
4279 sqlite_int64 iBlockid = 0; |
|
4280 int rc; |
|
4281 |
|
4282 /* If we can fit the segment inline */ |
|
4283 if( block==pWriter->last && block->data.nData<ROOT_MAX ){ |
|
4284 *ppRootInfo = block->data.pData; |
|
4285 *pnRootInfo = block->data.nData; |
|
4286 return SQLITE_OK; |
|
4287 } |
|
4288 |
|
4289 /* Flush the first block to %_segments, and create a new level of |
|
4290 ** interior node. |
|
4291 */ |
|
4292 ASSERT_VALID_INTERIOR_BLOCK(block); |
|
4293 rc = block_insert(v, block->data.pData, block->data.nData, &iBlockid); |
|
4294 if( rc!=SQLITE_OK ) return rc; |
|
4295 *piEndBlockid = iBlockid; |
|
4296 |
|
4297 pWriter->parentWriter = sqlite3_malloc(sizeof(*pWriter->parentWriter)); |
|
4298 interiorWriterInit(pWriter->iHeight+1, |
|
4299 block->term.pData, block->term.nData, |
|
4300 iBlockid, pWriter->parentWriter); |
|
4301 |
|
4302 /* Flush additional blocks and append to the higher interior |
|
4303 ** node. |
|
4304 */ |
|
4305 for(block=block->next; block!=NULL; block=block->next){ |
|
4306 ASSERT_VALID_INTERIOR_BLOCK(block); |
|
4307 rc = block_insert(v, block->data.pData, block->data.nData, &iBlockid); |
|
4308 if( rc!=SQLITE_OK ) return rc; |
|
4309 *piEndBlockid = iBlockid; |
|
4310 |
|
4311 interiorWriterAppend(pWriter->parentWriter, |
|
4312 block->term.pData, block->term.nData, iBlockid); |
|
4313 } |
|
4314 |
|
4315 /* Parent node gets the chance to be the root. */ |
|
4316 return interiorWriterRootInfo(v, pWriter->parentWriter, |
|
4317 ppRootInfo, pnRootInfo, piEndBlockid); |
|
4318 } |
|
4319 |
|
4320 /****************************************************************/ |
|
4321 /* InteriorReader is used to read off the data from an interior node |
|
4322 ** (see comment at top of file for the format). |
|
4323 */ |
|
4324 typedef struct InteriorReader { |
|
4325 const char *pData; |
|
4326 int nData; |
|
4327 |
|
4328 DataBuffer term; /* previous term, for decoding term delta. */ |
|
4329 |
|
4330 sqlite_int64 iBlockid; |
|
4331 } InteriorReader; |
|
4332 |
|
4333 static void interiorReaderDestroy(InteriorReader *pReader){ |
|
4334 dataBufferDestroy(&pReader->term); |
|
4335 SCRAMBLE(pReader); |
|
4336 } |
|
4337 |
|
4338 /* TODO(shess) The assertions are great, but what if we're in NDEBUG |
|
4339 ** and the blob is empty or otherwise contains suspect data? |
|
4340 */ |
|
4341 static void interiorReaderInit(const char *pData, int nData, |
|
4342 InteriorReader *pReader){ |
|
4343 int n, nTerm; |
|
4344 |
|
4345 /* Require at least the leading flag byte */ |
|
4346 assert( nData>0 ); |
|
4347 assert( pData[0]!='\0' ); |
|
4348 |
|
4349 CLEAR(pReader); |
|
4350 |
|
4351 /* Decode the base blockid, and set the cursor to the first term. */ |
|
4352 n = getVarint(pData+1, &pReader->iBlockid); |
|
4353 assert( 1+n<=nData ); |
|
4354 pReader->pData = pData+1+n; |
|
4355 pReader->nData = nData-(1+n); |
|
4356 |
|
4357 /* A single-child interior node (such as when a leaf node was too |
|
4358 ** large for the segment directory) won't have any terms. |
|
4359 ** Otherwise, decode the first term. |
|
4360 */ |
|
4361 if( pReader->nData==0 ){ |
|
4362 dataBufferInit(&pReader->term, 0); |
|
4363 }else{ |
|
4364 n = getVarint32(pReader->pData, &nTerm); |
|
4365 dataBufferInit(&pReader->term, nTerm); |
|
4366 dataBufferReplace(&pReader->term, pReader->pData+n, nTerm); |
|
4367 assert( n+nTerm<=pReader->nData ); |
|
4368 pReader->pData += n+nTerm; |
|
4369 pReader->nData -= n+nTerm; |
|
4370 } |
|
4371 } |
|
4372 |
|
4373 static int interiorReaderAtEnd(InteriorReader *pReader){ |
|
4374 return pReader->term.nData==0; |
|
4375 } |
|
4376 |
|
4377 static sqlite_int64 interiorReaderCurrentBlockid(InteriorReader *pReader){ |
|
4378 return pReader->iBlockid; |
|
4379 } |
|
4380 |
|
4381 static int interiorReaderTermBytes(InteriorReader *pReader){ |
|
4382 assert( !interiorReaderAtEnd(pReader) ); |
|
4383 return pReader->term.nData; |
|
4384 } |
|
4385 static const char *interiorReaderTerm(InteriorReader *pReader){ |
|
4386 assert( !interiorReaderAtEnd(pReader) ); |
|
4387 return pReader->term.pData; |
|
4388 } |
|
4389 |
|
4390 /* Step forward to the next term in the node. */ |
|
4391 static void interiorReaderStep(InteriorReader *pReader){ |
|
4392 assert( !interiorReaderAtEnd(pReader) ); |
|
4393 |
|
4394 /* If the last term has been read, signal eof, else construct the |
|
4395 ** next term. |
|
4396 */ |
|
4397 if( pReader->nData==0 ){ |
|
4398 dataBufferReset(&pReader->term); |
|
4399 }else{ |
|
4400 int n, nPrefix, nSuffix; |
|
4401 |
|
4402 n = getVarint32(pReader->pData, &nPrefix); |
|
4403 n += getVarint32(pReader->pData+n, &nSuffix); |
|
4404 |
|
4405 /* Truncate the current term and append suffix data. */ |
|
4406 pReader->term.nData = nPrefix; |
|
4407 dataBufferAppend(&pReader->term, pReader->pData+n, nSuffix); |
|
4408 |
|
4409 assert( n+nSuffix<=pReader->nData ); |
|
4410 pReader->pData += n+nSuffix; |
|
4411 pReader->nData -= n+nSuffix; |
|
4412 } |
|
4413 pReader->iBlockid++; |
|
4414 } |
|
4415 |
|
4416 /* Compare the current term to pTerm[nTerm], returning strcmp-style |
|
4417 ** results. If isPrefix, equality means equal through nTerm bytes. |
|
4418 */ |
|
4419 static int interiorReaderTermCmp(InteriorReader *pReader, |
|
4420 const char *pTerm, int nTerm, int isPrefix){ |
|
4421 const char *pReaderTerm = interiorReaderTerm(pReader); |
|
4422 int nReaderTerm = interiorReaderTermBytes(pReader); |
|
4423 int c, n = nReaderTerm<nTerm ? nReaderTerm : nTerm; |
|
4424 |
|
4425 if( n==0 ){ |
|
4426 if( nReaderTerm>0 ) return -1; |
|
4427 if( nTerm>0 ) return 1; |
|
4428 return 0; |
|
4429 } |
|
4430 |
|
4431 c = memcmp(pReaderTerm, pTerm, n); |
|
4432 if( c!=0 ) return c; |
|
4433 if( isPrefix && n==nTerm ) return 0; |
|
4434 return nReaderTerm - nTerm; |
|
4435 } |
|
4436 |
|
4437 /****************************************************************/ |
|
4438 /* LeafWriter is used to collect terms and associated doclist data |
|
4439 ** into leaf blocks in %_segments (see top of file for format info). |
|
4440 ** Expected usage is: |
|
4441 ** |
|
4442 ** LeafWriter writer; |
|
4443 ** leafWriterInit(0, 0, &writer); |
|
4444 ** while( sorted_terms_left_to_process ){ |
|
4445 ** // data is doclist data for that term. |
|
4446 ** rc = leafWriterStep(v, &writer, pTerm, nTerm, pData, nData); |
|
4447 ** if( rc!=SQLITE_OK ) goto err; |
|
4448 ** } |
|
4449 ** rc = leafWriterFinalize(v, &writer); |
|
4450 **err: |
|
4451 ** leafWriterDestroy(&writer); |
|
4452 ** return rc; |
|
4453 ** |
|
4454 ** leafWriterStep() may write a collected leaf out to %_segments. |
|
4455 ** leafWriterFinalize() finishes writing any buffered data and stores |
|
4456 ** a root node in %_segdir. leafWriterDestroy() frees all buffers and |
|
4457 ** InteriorWriters allocated as part of writing this segment. |
|
4458 ** |
|
4459 ** TODO(shess) Document leafWriterStepMerge(). |
|
4460 */ |
|
4461 |
|
4462 /* Put terms with data this big in their own block. */ |
|
4463 #define STANDALONE_MIN 1024 |
|
4464 |
|
4465 /* Keep leaf blocks below this size. */ |
|
4466 #define LEAF_MAX 2048 |
|
4467 |
|
4468 typedef struct LeafWriter { |
|
4469 int iLevel; |
|
4470 int idx; |
|
4471 sqlite_int64 iStartBlockid; /* needed to create the root info */ |
|
4472 sqlite_int64 iEndBlockid; /* when we're done writing. */ |
|
4473 |
|
4474 DataBuffer term; /* previous encoded term */ |
|
4475 DataBuffer data; /* encoding buffer */ |
|
4476 |
|
4477 /* bytes of first term in the current node which distinguishes that |
|
4478 ** term from the last term of the previous node. |
|
4479 */ |
|
4480 int nTermDistinct; |
|
4481 |
|
4482 InteriorWriter parentWriter; /* if we overflow */ |
|
4483 int has_parent; |
|
4484 } LeafWriter; |
|
4485 |
|
4486 static void leafWriterInit(int iLevel, int idx, LeafWriter *pWriter){ |
|
4487 CLEAR(pWriter); |
|
4488 pWriter->iLevel = iLevel; |
|
4489 pWriter->idx = idx; |
|
4490 |
|
4491 dataBufferInit(&pWriter->term, 32); |
|
4492 |
|
4493 /* Start out with a reasonably sized block, though it can grow. */ |
|
4494 dataBufferInit(&pWriter->data, LEAF_MAX); |
|
4495 } |
|
4496 |
|
4497 #ifndef NDEBUG |
|
4498 /* Verify that the data is readable as a leaf node. */ |
|
4499 static void leafNodeValidate(const char *pData, int nData){ |
|
4500 int n, iDummy; |
|
4501 |
|
4502 if( nData==0 ) return; |
|
4503 assert( nData>0 ); |
|
4504 assert( pData!=0 ); |
|
4505 assert( pData+nData>pData ); |
|
4506 |
|
4507 /* Must lead with a varint(0) */ |
|
4508 n = getVarint32(pData, &iDummy); |
|
4509 assert( iDummy==0 ); |
|
4510 assert( n>0 ); |
|
4511 assert( n<nData ); |
|
4512 pData += n; |
|
4513 nData -= n; |
|
4514 |
|
4515 /* Leading term length and data must fit in buffer. */ |
|
4516 n = getVarint32(pData, &iDummy); |
|
4517 assert( n>0 ); |
|
4518 assert( iDummy>0 ); |
|
4519 assert( n+iDummy>0 ); |
|
4520 assert( n+iDummy<nData ); |
|
4521 pData += n+iDummy; |
|
4522 nData -= n+iDummy; |
|
4523 |
|
4524 /* Leading term's doclist length and data must fit. */ |
|
4525 n = getVarint32(pData, &iDummy); |
|
4526 assert( n>0 ); |
|
4527 assert( iDummy>0 ); |
|
4528 assert( n+iDummy>0 ); |
|
4529 assert( n+iDummy<=nData ); |
|
4530 ASSERT_VALID_DOCLIST(DL_DEFAULT, pData+n, iDummy, NULL); |
|
4531 pData += n+iDummy; |
|
4532 nData -= n+iDummy; |
|
4533 |
|
4534 /* Verify that trailing terms and doclists also are readable. */ |
|
4535 while( nData!=0 ){ |
|
4536 n = getVarint32(pData, &iDummy); |
|
4537 assert( n>0 ); |
|
4538 assert( iDummy>=0 ); |
|
4539 assert( n<nData ); |
|
4540 pData += n; |
|
4541 nData -= n; |
|
4542 n = getVarint32(pData, &iDummy); |
|
4543 assert( n>0 ); |
|
4544 assert( iDummy>0 ); |
|
4545 assert( n+iDummy>0 ); |
|
4546 assert( n+iDummy<nData ); |
|
4547 pData += n+iDummy; |
|
4548 nData -= n+iDummy; |
|
4549 |
|
4550 n = getVarint32(pData, &iDummy); |
|
4551 assert( n>0 ); |
|
4552 assert( iDummy>0 ); |
|
4553 assert( n+iDummy>0 ); |
|
4554 assert( n+iDummy<=nData ); |
|
4555 ASSERT_VALID_DOCLIST(DL_DEFAULT, pData+n, iDummy, NULL); |
|
4556 pData += n+iDummy; |
|
4557 nData -= n+iDummy; |
|
4558 } |
|
4559 } |
|
4560 #define ASSERT_VALID_LEAF_NODE(p, n) leafNodeValidate(p, n) |
|
4561 #else |
|
4562 #define ASSERT_VALID_LEAF_NODE(p, n) assert( 1 ) |
|
4563 #endif |
|
4564 |
|
4565 /* Flush the current leaf node to %_segments, and adding the resulting |
|
4566 ** blockid and the starting term to the interior node which will |
|
4567 ** contain it. |
|
4568 */ |
|
4569 static int leafWriterInternalFlush(fulltext_vtab *v, LeafWriter *pWriter, |
|
4570 int iData, int nData){ |
|
4571 sqlite_int64 iBlockid = 0; |
|
4572 const char *pStartingTerm; |
|
4573 int nStartingTerm, rc, n; |
|
4574 |
|
4575 /* Must have the leading varint(0) flag, plus at least some |
|
4576 ** valid-looking data. |
|
4577 */ |
|
4578 assert( nData>2 ); |
|
4579 assert( iData>=0 ); |
|
4580 assert( iData+nData<=pWriter->data.nData ); |
|
4581 ASSERT_VALID_LEAF_NODE(pWriter->data.pData+iData, nData); |
|
4582 |
|
4583 rc = block_insert(v, pWriter->data.pData+iData, nData, &iBlockid); |
|
4584 if( rc!=SQLITE_OK ) return rc; |
|
4585 assert( iBlockid!=0 ); |
|
4586 |
|
4587 /* Reconstruct the first term in the leaf for purposes of building |
|
4588 ** the interior node. |
|
4589 */ |
|
4590 n = getVarint32(pWriter->data.pData+iData+1, &nStartingTerm); |
|
4591 pStartingTerm = pWriter->data.pData+iData+1+n; |
|
4592 assert( pWriter->data.nData>iData+1+n+nStartingTerm ); |
|
4593 assert( pWriter->nTermDistinct>0 ); |
|
4594 assert( pWriter->nTermDistinct<=nStartingTerm ); |
|
4595 nStartingTerm = pWriter->nTermDistinct; |
|
4596 |
|
4597 if( pWriter->has_parent ){ |
|
4598 interiorWriterAppend(&pWriter->parentWriter, |
|
4599 pStartingTerm, nStartingTerm, iBlockid); |
|
4600 }else{ |
|
4601 interiorWriterInit(1, pStartingTerm, nStartingTerm, iBlockid, |
|
4602 &pWriter->parentWriter); |
|
4603 pWriter->has_parent = 1; |
|
4604 } |
|
4605 |
|
4606 /* Track the span of this segment's leaf nodes. */ |
|
4607 if( pWriter->iEndBlockid==0 ){ |
|
4608 pWriter->iEndBlockid = pWriter->iStartBlockid = iBlockid; |
|
4609 }else{ |
|
4610 pWriter->iEndBlockid++; |
|
4611 assert( iBlockid==pWriter->iEndBlockid ); |
|
4612 } |
|
4613 |
|
4614 return SQLITE_OK; |
|
4615 } |
|
4616 static int leafWriterFlush(fulltext_vtab *v, LeafWriter *pWriter){ |
|
4617 int rc = leafWriterInternalFlush(v, pWriter, 0, pWriter->data.nData); |
|
4618 if( rc!=SQLITE_OK ) return rc; |
|
4619 |
|
4620 /* Re-initialize the output buffer. */ |
|
4621 dataBufferReset(&pWriter->data); |
|
4622 |
|
4623 return SQLITE_OK; |
|
4624 } |
|
4625 |
|
4626 /* Fetch the root info for the segment. If the entire leaf fits |
|
4627 ** within ROOT_MAX, then it will be returned directly, otherwise it |
|
4628 ** will be flushed and the root info will be returned from the |
|
4629 ** interior node. *piEndBlockid is set to the blockid of the last |
|
4630 ** interior or leaf node written to disk (0 if none are written at |
|
4631 ** all). |
|
4632 */ |
|
4633 static int leafWriterRootInfo(fulltext_vtab *v, LeafWriter *pWriter, |
|
4634 char **ppRootInfo, int *pnRootInfo, |
|
4635 sqlite_int64 *piEndBlockid){ |
|
4636 /* we can fit the segment entirely inline */ |
|
4637 if( !pWriter->has_parent && pWriter->data.nData<ROOT_MAX ){ |
|
4638 *ppRootInfo = pWriter->data.pData; |
|
4639 *pnRootInfo = pWriter->data.nData; |
|
4640 *piEndBlockid = 0; |
|
4641 return SQLITE_OK; |
|
4642 } |
|
4643 |
|
4644 /* Flush remaining leaf data. */ |
|
4645 if( pWriter->data.nData>0 ){ |
|
4646 int rc = leafWriterFlush(v, pWriter); |
|
4647 if( rc!=SQLITE_OK ) return rc; |
|
4648 } |
|
4649 |
|
4650 /* We must have flushed a leaf at some point. */ |
|
4651 assert( pWriter->has_parent ); |
|
4652 |
|
4653 /* Tenatively set the end leaf blockid as the end blockid. If the |
|
4654 ** interior node can be returned inline, this will be the final |
|
4655 ** blockid, otherwise it will be overwritten by |
|
4656 ** interiorWriterRootInfo(). |
|
4657 */ |
|
4658 *piEndBlockid = pWriter->iEndBlockid; |
|
4659 |
|
4660 return interiorWriterRootInfo(v, &pWriter->parentWriter, |
|
4661 ppRootInfo, pnRootInfo, piEndBlockid); |
|
4662 } |
|
4663 |
|
4664 /* Collect the rootInfo data and store it into the segment directory. |
|
4665 ** This has the effect of flushing the segment's leaf data to |
|
4666 ** %_segments, and also flushing any interior nodes to %_segments. |
|
4667 */ |
|
4668 static int leafWriterFinalize(fulltext_vtab *v, LeafWriter *pWriter){ |
|
4669 sqlite_int64 iEndBlockid; |
|
4670 char *pRootInfo; |
|
4671 int rc, nRootInfo; |
|
4672 |
|
4673 rc = leafWriterRootInfo(v, pWriter, &pRootInfo, &nRootInfo, &iEndBlockid); |
|
4674 if( rc!=SQLITE_OK ) return rc; |
|
4675 |
|
4676 /* Don't bother storing an entirely empty segment. */ |
|
4677 if( iEndBlockid==0 && nRootInfo==0 ) return SQLITE_OK; |
|
4678 |
|
4679 return segdir_set(v, pWriter->iLevel, pWriter->idx, |
|
4680 pWriter->iStartBlockid, pWriter->iEndBlockid, |
|
4681 iEndBlockid, pRootInfo, nRootInfo); |
|
4682 } |
|
4683 |
|
4684 static void leafWriterDestroy(LeafWriter *pWriter){ |
|
4685 if( pWriter->has_parent ) interiorWriterDestroy(&pWriter->parentWriter); |
|
4686 dataBufferDestroy(&pWriter->term); |
|
4687 dataBufferDestroy(&pWriter->data); |
|
4688 } |
|
4689 |
|
4690 /* Encode a term into the leafWriter, delta-encoding as appropriate. |
|
4691 ** Returns the length of the new term which distinguishes it from the |
|
4692 ** previous term, which can be used to set nTermDistinct when a node |
|
4693 ** boundary is crossed. |
|
4694 */ |
|
4695 static int leafWriterEncodeTerm(LeafWriter *pWriter, |
|
4696 const char *pTerm, int nTerm){ |
|
4697 char c[VARINT_MAX+VARINT_MAX]; |
|
4698 int n, nPrefix = 0; |
|
4699 |
|
4700 assert( nTerm>0 ); |
|
4701 while( nPrefix<pWriter->term.nData && |
|
4702 pTerm[nPrefix]==pWriter->term.pData[nPrefix] ){ |
|
4703 nPrefix++; |
|
4704 /* Failing this implies that the terms weren't in order. */ |
|
4705 assert( nPrefix<nTerm ); |
|
4706 } |
|
4707 |
|
4708 if( pWriter->data.nData==0 ){ |
|
4709 /* Encode the node header and leading term as: |
|
4710 ** varint(0) |
|
4711 ** varint(nTerm) |
|
4712 ** char pTerm[nTerm] |
|
4713 */ |
|
4714 n = putVarint(c, '\0'); |
|
4715 n += putVarint(c+n, nTerm); |
|
4716 dataBufferAppend2(&pWriter->data, c, n, pTerm, nTerm); |
|
4717 }else{ |
|
4718 /* Delta-encode the term as: |
|
4719 ** varint(nPrefix) |
|
4720 ** varint(nSuffix) |
|
4721 ** char pTermSuffix[nSuffix] |
|
4722 */ |
|
4723 n = putVarint(c, nPrefix); |
|
4724 n += putVarint(c+n, nTerm-nPrefix); |
|
4725 dataBufferAppend2(&pWriter->data, c, n, pTerm+nPrefix, nTerm-nPrefix); |
|
4726 } |
|
4727 dataBufferReplace(&pWriter->term, pTerm, nTerm); |
|
4728 |
|
4729 return nPrefix+1; |
|
4730 } |
|
4731 |
|
4732 /* Used to avoid a memmove when a large amount of doclist data is in |
|
4733 ** the buffer. This constructs a node and term header before |
|
4734 ** iDoclistData and flushes the resulting complete node using |
|
4735 ** leafWriterInternalFlush(). |
|
4736 */ |
|
4737 static int leafWriterInlineFlush(fulltext_vtab *v, LeafWriter *pWriter, |
|
4738 const char *pTerm, int nTerm, |
|
4739 int iDoclistData){ |
|
4740 char c[VARINT_MAX+VARINT_MAX]; |
|
4741 int iData, n = putVarint(c, 0); |
|
4742 n += putVarint(c+n, nTerm); |
|
4743 |
|
4744 /* There should always be room for the header. Even if pTerm shared |
|
4745 ** a substantial prefix with the previous term, the entire prefix |
|
4746 ** could be constructed from earlier data in the doclist, so there |
|
4747 ** should be room. |
|
4748 */ |
|
4749 assert( iDoclistData>=n+nTerm ); |
|
4750 |
|
4751 iData = iDoclistData-(n+nTerm); |
|
4752 memcpy(pWriter->data.pData+iData, c, n); |
|
4753 memcpy(pWriter->data.pData+iData+n, pTerm, nTerm); |
|
4754 |
|
4755 return leafWriterInternalFlush(v, pWriter, iData, pWriter->data.nData-iData); |
|
4756 } |
|
4757 |
|
4758 /* Push pTerm[nTerm] along with the doclist data to the leaf layer of |
|
4759 ** %_segments. |
|
4760 */ |
|
4761 static int leafWriterStepMerge(fulltext_vtab *v, LeafWriter *pWriter, |
|
4762 const char *pTerm, int nTerm, |
|
4763 DLReader *pReaders, int nReaders){ |
|
4764 char c[VARINT_MAX+VARINT_MAX]; |
|
4765 int iTermData = pWriter->data.nData, iDoclistData; |
|
4766 int i, nData, n, nActualData, nActual, rc, nTermDistinct; |
|
4767 |
|
4768 ASSERT_VALID_LEAF_NODE(pWriter->data.pData, pWriter->data.nData); |
|
4769 nTermDistinct = leafWriterEncodeTerm(pWriter, pTerm, nTerm); |
|
4770 |
|
4771 /* Remember nTermDistinct if opening a new node. */ |
|
4772 if( iTermData==0 ) pWriter->nTermDistinct = nTermDistinct; |
|
4773 |
|
4774 iDoclistData = pWriter->data.nData; |
|
4775 |
|
4776 /* Estimate the length of the merged doclist so we can leave space |
|
4777 ** to encode it. |
|
4778 */ |
|
4779 for(i=0, nData=0; i<nReaders; i++){ |
|
4780 nData += dlrAllDataBytes(&pReaders[i]); |
|
4781 } |
|
4782 n = putVarint(c, nData); |
|
4783 dataBufferAppend(&pWriter->data, c, n); |
|
4784 |
|
4785 docListMerge(&pWriter->data, pReaders, nReaders); |
|
4786 ASSERT_VALID_DOCLIST(DL_DEFAULT, |
|
4787 pWriter->data.pData+iDoclistData+n, |
|
4788 pWriter->data.nData-iDoclistData-n, NULL); |
|
4789 |
|
4790 /* The actual amount of doclist data at this point could be smaller |
|
4791 ** than the length we encoded. Additionally, the space required to |
|
4792 ** encode this length could be smaller. For small doclists, this is |
|
4793 ** not a big deal, we can just use memmove() to adjust things. |
|
4794 */ |
|
4795 nActualData = pWriter->data.nData-(iDoclistData+n); |
|
4796 nActual = putVarint(c, nActualData); |
|
4797 assert( nActualData<=nData ); |
|
4798 assert( nActual<=n ); |
|
4799 |
|
4800 /* If the new doclist is big enough for force a standalone leaf |
|
4801 ** node, we can immediately flush it inline without doing the |
|
4802 ** memmove(). |
|
4803 */ |
|
4804 /* TODO(shess) This test matches leafWriterStep(), which does this |
|
4805 ** test before it knows the cost to varint-encode the term and |
|
4806 ** doclist lengths. At some point, change to |
|
4807 ** pWriter->data.nData-iTermData>STANDALONE_MIN. |
|
4808 */ |
|
4809 if( nTerm+nActualData>STANDALONE_MIN ){ |
|
4810 /* Push leaf node from before this term. */ |
|
4811 if( iTermData>0 ){ |
|
4812 rc = leafWriterInternalFlush(v, pWriter, 0, iTermData); |
|
4813 if( rc!=SQLITE_OK ) return rc; |
|
4814 |
|
4815 pWriter->nTermDistinct = nTermDistinct; |
|
4816 } |
|
4817 |
|
4818 /* Fix the encoded doclist length. */ |
|
4819 iDoclistData += n - nActual; |
|
4820 memcpy(pWriter->data.pData+iDoclistData, c, nActual); |
|
4821 |
|
4822 /* Push the standalone leaf node. */ |
|
4823 rc = leafWriterInlineFlush(v, pWriter, pTerm, nTerm, iDoclistData); |
|
4824 if( rc!=SQLITE_OK ) return rc; |
|
4825 |
|
4826 /* Leave the node empty. */ |
|
4827 dataBufferReset(&pWriter->data); |
|
4828 |
|
4829 return rc; |
|
4830 } |
|
4831 |
|
4832 /* At this point, we know that the doclist was small, so do the |
|
4833 ** memmove if indicated. |
|
4834 */ |
|
4835 if( nActual<n ){ |
|
4836 memmove(pWriter->data.pData+iDoclistData+nActual, |
|
4837 pWriter->data.pData+iDoclistData+n, |
|
4838 pWriter->data.nData-(iDoclistData+n)); |
|
4839 pWriter->data.nData -= n-nActual; |
|
4840 } |
|
4841 |
|
4842 /* Replace written length with actual length. */ |
|
4843 memcpy(pWriter->data.pData+iDoclistData, c, nActual); |
|
4844 |
|
4845 /* If the node is too large, break things up. */ |
|
4846 /* TODO(shess) This test matches leafWriterStep(), which does this |
|
4847 ** test before it knows the cost to varint-encode the term and |
|
4848 ** doclist lengths. At some point, change to |
|
4849 ** pWriter->data.nData>LEAF_MAX. |
|
4850 */ |
|
4851 if( iTermData+nTerm+nActualData>LEAF_MAX ){ |
|
4852 /* Flush out the leading data as a node */ |
|
4853 rc = leafWriterInternalFlush(v, pWriter, 0, iTermData); |
|
4854 if( rc!=SQLITE_OK ) return rc; |
|
4855 |
|
4856 pWriter->nTermDistinct = nTermDistinct; |
|
4857 |
|
4858 /* Rebuild header using the current term */ |
|
4859 n = putVarint(pWriter->data.pData, 0); |
|
4860 n += putVarint(pWriter->data.pData+n, nTerm); |
|
4861 memcpy(pWriter->data.pData+n, pTerm, nTerm); |
|
4862 n += nTerm; |
|
4863 |
|
4864 /* There should always be room, because the previous encoding |
|
4865 ** included all data necessary to construct the term. |
|
4866 */ |
|
4867 assert( n<iDoclistData ); |
|
4868 /* So long as STANDALONE_MIN is half or less of LEAF_MAX, the |
|
4869 ** following memcpy() is safe (as opposed to needing a memmove). |
|
4870 */ |
|
4871 assert( 2*STANDALONE_MIN<=LEAF_MAX ); |
|
4872 assert( n+pWriter->data.nData-iDoclistData<iDoclistData ); |
|
4873 memcpy(pWriter->data.pData+n, |
|
4874 pWriter->data.pData+iDoclistData, |
|
4875 pWriter->data.nData-iDoclistData); |
|
4876 pWriter->data.nData -= iDoclistData-n; |
|
4877 } |
|
4878 ASSERT_VALID_LEAF_NODE(pWriter->data.pData, pWriter->data.nData); |
|
4879 |
|
4880 return SQLITE_OK; |
|
4881 } |
|
4882 |
|
4883 /* Push pTerm[nTerm] along with the doclist data to the leaf layer of |
|
4884 ** %_segments. |
|
4885 */ |
|
4886 /* TODO(shess) Revise writeZeroSegment() so that doclists are |
|
4887 ** constructed directly in pWriter->data. |
|
4888 */ |
|
4889 static int leafWriterStep(fulltext_vtab *v, LeafWriter *pWriter, |
|
4890 const char *pTerm, int nTerm, |
|
4891 const char *pData, int nData){ |
|
4892 int rc; |
|
4893 DLReader reader; |
|
4894 |
|
4895 dlrInit(&reader, DL_DEFAULT, pData, nData); |
|
4896 rc = leafWriterStepMerge(v, pWriter, pTerm, nTerm, &reader, 1); |
|
4897 dlrDestroy(&reader); |
|
4898 |
|
4899 return rc; |
|
4900 } |
|
4901 |
|
4902 |
|
4903 /****************************************************************/ |
|
4904 /* LeafReader is used to iterate over an individual leaf node. */ |
|
4905 typedef struct LeafReader { |
|
4906 DataBuffer term; /* copy of current term. */ |
|
4907 |
|
4908 const char *pData; /* data for current term. */ |
|
4909 int nData; |
|
4910 } LeafReader; |
|
4911 |
|
4912 static void leafReaderDestroy(LeafReader *pReader){ |
|
4913 dataBufferDestroy(&pReader->term); |
|
4914 SCRAMBLE(pReader); |
|
4915 } |
|
4916 |
|
4917 static int leafReaderAtEnd(LeafReader *pReader){ |
|
4918 return pReader->nData<=0; |
|
4919 } |
|
4920 |
|
4921 /* Access the current term. */ |
|
4922 static int leafReaderTermBytes(LeafReader *pReader){ |
|
4923 return pReader->term.nData; |
|
4924 } |
|
4925 static const char *leafReaderTerm(LeafReader *pReader){ |
|
4926 assert( pReader->term.nData>0 ); |
|
4927 return pReader->term.pData; |
|
4928 } |
|
4929 |
|
4930 /* Access the doclist data for the current term. */ |
|
4931 static int leafReaderDataBytes(LeafReader *pReader){ |
|
4932 int nData; |
|
4933 assert( pReader->term.nData>0 ); |
|
4934 getVarint32(pReader->pData, &nData); |
|
4935 return nData; |
|
4936 } |
|
4937 static const char *leafReaderData(LeafReader *pReader){ |
|
4938 int n, nData; |
|
4939 assert( pReader->term.nData>0 ); |
|
4940 n = getVarint32(pReader->pData, &nData); |
|
4941 return pReader->pData+n; |
|
4942 } |
|
4943 |
|
4944 static void leafReaderInit(const char *pData, int nData, |
|
4945 LeafReader *pReader){ |
|
4946 int nTerm, n; |
|
4947 |
|
4948 assert( nData>0 ); |
|
4949 assert( pData[0]=='\0' ); |
|
4950 |
|
4951 CLEAR(pReader); |
|
4952 |
|
4953 /* Read the first term, skipping the header byte. */ |
|
4954 n = getVarint32(pData+1, &nTerm); |
|
4955 dataBufferInit(&pReader->term, nTerm); |
|
4956 dataBufferReplace(&pReader->term, pData+1+n, nTerm); |
|
4957 |
|
4958 /* Position after the first term. */ |
|
4959 assert( 1+n+nTerm<nData ); |
|
4960 pReader->pData = pData+1+n+nTerm; |
|
4961 pReader->nData = nData-1-n-nTerm; |
|
4962 } |
|
4963 |
|
4964 /* Step the reader forward to the next term. */ |
|
4965 static void leafReaderStep(LeafReader *pReader){ |
|
4966 int n, nData, nPrefix, nSuffix; |
|
4967 assert( !leafReaderAtEnd(pReader) ); |
|
4968 |
|
4969 /* Skip previous entry's data block. */ |
|
4970 n = getVarint32(pReader->pData, &nData); |
|
4971 assert( n+nData<=pReader->nData ); |
|
4972 pReader->pData += n+nData; |
|
4973 pReader->nData -= n+nData; |
|
4974 |
|
4975 if( !leafReaderAtEnd(pReader) ){ |
|
4976 /* Construct the new term using a prefix from the old term plus a |
|
4977 ** suffix from the leaf data. |
|
4978 */ |
|
4979 n = getVarint32(pReader->pData, &nPrefix); |
|
4980 n += getVarint32(pReader->pData+n, &nSuffix); |
|
4981 assert( n+nSuffix<pReader->nData ); |
|
4982 pReader->term.nData = nPrefix; |
|
4983 dataBufferAppend(&pReader->term, pReader->pData+n, nSuffix); |
|
4984 |
|
4985 pReader->pData += n+nSuffix; |
|
4986 pReader->nData -= n+nSuffix; |
|
4987 } |
|
4988 } |
|
4989 |
|
4990 /* strcmp-style comparison of pReader's current term against pTerm. |
|
4991 ** If isPrefix, equality means equal through nTerm bytes. |
|
4992 */ |
|
4993 static int leafReaderTermCmp(LeafReader *pReader, |
|
4994 const char *pTerm, int nTerm, int isPrefix){ |
|
4995 int c, n = pReader->term.nData<nTerm ? pReader->term.nData : nTerm; |
|
4996 if( n==0 ){ |
|
4997 if( pReader->term.nData>0 ) return -1; |
|
4998 if(nTerm>0 ) return 1; |
|
4999 return 0; |
|
5000 } |
|
5001 |
|
5002 c = memcmp(pReader->term.pData, pTerm, n); |
|
5003 if( c!=0 ) return c; |
|
5004 if( isPrefix && n==nTerm ) return 0; |
|
5005 return pReader->term.nData - nTerm; |
|
5006 } |
|
5007 |
|
5008 |
|
5009 /****************************************************************/ |
|
5010 /* LeavesReader wraps LeafReader to allow iterating over the entire |
|
5011 ** leaf layer of the tree. |
|
5012 */ |
|
5013 typedef struct LeavesReader { |
|
5014 int idx; /* Index within the segment. */ |
|
5015 |
|
5016 sqlite3_stmt *pStmt; /* Statement we're streaming leaves from. */ |
|
5017 int eof; /* we've seen SQLITE_DONE from pStmt. */ |
|
5018 |
|
5019 LeafReader leafReader; /* reader for the current leaf. */ |
|
5020 DataBuffer rootData; /* root data for inline. */ |
|
5021 } LeavesReader; |
|
5022 |
|
5023 /* Access the current term. */ |
|
5024 static int leavesReaderTermBytes(LeavesReader *pReader){ |
|
5025 assert( !pReader->eof ); |
|
5026 return leafReaderTermBytes(&pReader->leafReader); |
|
5027 } |
|
5028 static const char *leavesReaderTerm(LeavesReader *pReader){ |
|
5029 assert( !pReader->eof ); |
|
5030 return leafReaderTerm(&pReader->leafReader); |
|
5031 } |
|
5032 |
|
5033 /* Access the doclist data for the current term. */ |
|
5034 static int leavesReaderDataBytes(LeavesReader *pReader){ |
|
5035 assert( !pReader->eof ); |
|
5036 return leafReaderDataBytes(&pReader->leafReader); |
|
5037 } |
|
5038 static const char *leavesReaderData(LeavesReader *pReader){ |
|
5039 assert( !pReader->eof ); |
|
5040 return leafReaderData(&pReader->leafReader); |
|
5041 } |
|
5042 |
|
5043 static int leavesReaderAtEnd(LeavesReader *pReader){ |
|
5044 return pReader->eof; |
|
5045 } |
|
5046 |
|
5047 /* loadSegmentLeaves() may not read all the way to SQLITE_DONE, thus |
|
5048 ** leaving the statement handle open, which locks the table. |
|
5049 */ |
|
5050 /* TODO(shess) This "solution" is not satisfactory. Really, there |
|
5051 ** should be check-in function for all statement handles which |
|
5052 ** arranges to call sqlite3_reset(). This most likely will require |
|
5053 ** modification to control flow all over the place, though, so for now |
|
5054 ** just punt. |
|
5055 ** |
|
5056 ** Note the the current system assumes that segment merges will run to |
|
5057 ** completion, which is why this particular probably hasn't arisen in |
|
5058 ** this case. Probably a brittle assumption. |
|
5059 */ |
|
5060 static int leavesReaderReset(LeavesReader *pReader){ |
|
5061 return sqlite3_reset(pReader->pStmt); |
|
5062 } |
|
5063 |
|
5064 static void leavesReaderDestroy(LeavesReader *pReader){ |
|
5065 /* If idx is -1, that means we're using a non-cached statement |
|
5066 ** handle in the optimize() case, so we need to release it. |
|
5067 */ |
|
5068 if( pReader->pStmt!=NULL && pReader->idx==-1 ){ |
|
5069 sqlite3_finalize(pReader->pStmt); |
|
5070 } |
|
5071 leafReaderDestroy(&pReader->leafReader); |
|
5072 dataBufferDestroy(&pReader->rootData); |
|
5073 SCRAMBLE(pReader); |
|
5074 } |
|
5075 |
|
5076 /* Initialize pReader with the given root data (if iStartBlockid==0 |
|
5077 ** the leaf data was entirely contained in the root), or from the |
|
5078 ** stream of blocks between iStartBlockid and iEndBlockid, inclusive. |
|
5079 */ |
|
5080 static int leavesReaderInit(fulltext_vtab *v, |
|
5081 int idx, |
|
5082 sqlite_int64 iStartBlockid, |
|
5083 sqlite_int64 iEndBlockid, |
|
5084 const char *pRootData, int nRootData, |
|
5085 LeavesReader *pReader){ |
|
5086 CLEAR(pReader); |
|
5087 pReader->idx = idx; |
|
5088 |
|
5089 dataBufferInit(&pReader->rootData, 0); |
|
5090 if( iStartBlockid==0 ){ |
|
5091 /* Entire leaf level fit in root data. */ |
|
5092 dataBufferReplace(&pReader->rootData, pRootData, nRootData); |
|
5093 leafReaderInit(pReader->rootData.pData, pReader->rootData.nData, |
|
5094 &pReader->leafReader); |
|
5095 }else{ |
|
5096 sqlite3_stmt *s; |
|
5097 int rc = sql_get_leaf_statement(v, idx, &s); |
|
5098 if( rc!=SQLITE_OK ) return rc; |
|
5099 |
|
5100 rc = sqlite3_bind_int64(s, 1, iStartBlockid); |
|
5101 if( rc!=SQLITE_OK ) return rc; |
|
5102 |
|
5103 rc = sqlite3_bind_int64(s, 2, iEndBlockid); |
|
5104 if( rc!=SQLITE_OK ) return rc; |
|
5105 |
|
5106 rc = sqlite3_step(s); |
|
5107 if( rc==SQLITE_DONE ){ |
|
5108 pReader->eof = 1; |
|
5109 return SQLITE_OK; |
|
5110 } |
|
5111 if( rc!=SQLITE_ROW ) return rc; |
|
5112 |
|
5113 pReader->pStmt = s; |
|
5114 leafReaderInit(sqlite3_column_blob(pReader->pStmt, 0), |
|
5115 sqlite3_column_bytes(pReader->pStmt, 0), |
|
5116 &pReader->leafReader); |
|
5117 } |
|
5118 return SQLITE_OK; |
|
5119 } |
|
5120 |
|
5121 /* Step the current leaf forward to the next term. If we reach the |
|
5122 ** end of the current leaf, step forward to the next leaf block. |
|
5123 */ |
|
5124 static int leavesReaderStep(fulltext_vtab *v, LeavesReader *pReader){ |
|
5125 assert( !leavesReaderAtEnd(pReader) ); |
|
5126 leafReaderStep(&pReader->leafReader); |
|
5127 |
|
5128 if( leafReaderAtEnd(&pReader->leafReader) ){ |
|
5129 int rc; |
|
5130 if( pReader->rootData.pData ){ |
|
5131 pReader->eof = 1; |
|
5132 return SQLITE_OK; |
|
5133 } |
|
5134 rc = sqlite3_step(pReader->pStmt); |
|
5135 if( rc!=SQLITE_ROW ){ |
|
5136 pReader->eof = 1; |
|
5137 return rc==SQLITE_DONE ? SQLITE_OK : rc; |
|
5138 } |
|
5139 leafReaderDestroy(&pReader->leafReader); |
|
5140 leafReaderInit(sqlite3_column_blob(pReader->pStmt, 0), |
|
5141 sqlite3_column_bytes(pReader->pStmt, 0), |
|
5142 &pReader->leafReader); |
|
5143 } |
|
5144 return SQLITE_OK; |
|
5145 } |
|
5146 |
|
5147 /* Order LeavesReaders by their term, ignoring idx. Readers at eof |
|
5148 ** always sort to the end. |
|
5149 */ |
|
5150 static int leavesReaderTermCmp(LeavesReader *lr1, LeavesReader *lr2){ |
|
5151 if( leavesReaderAtEnd(lr1) ){ |
|
5152 if( leavesReaderAtEnd(lr2) ) return 0; |
|
5153 return 1; |
|
5154 } |
|
5155 if( leavesReaderAtEnd(lr2) ) return -1; |
|
5156 |
|
5157 return leafReaderTermCmp(&lr1->leafReader, |
|
5158 leavesReaderTerm(lr2), leavesReaderTermBytes(lr2), |
|
5159 0); |
|
5160 } |
|
5161 |
|
5162 /* Similar to leavesReaderTermCmp(), with additional ordering by idx |
|
5163 ** so that older segments sort before newer segments. |
|
5164 */ |
|
5165 static int leavesReaderCmp(LeavesReader *lr1, LeavesReader *lr2){ |
|
5166 int c = leavesReaderTermCmp(lr1, lr2); |
|
5167 if( c!=0 ) return c; |
|
5168 return lr1->idx-lr2->idx; |
|
5169 } |
|
5170 |
|
5171 /* Assume that pLr[1]..pLr[nLr] are sorted. Bubble pLr[0] into its |
|
5172 ** sorted position. |
|
5173 */ |
|
5174 static void leavesReaderReorder(LeavesReader *pLr, int nLr){ |
|
5175 while( nLr>1 && leavesReaderCmp(pLr, pLr+1)>0 ){ |
|
5176 LeavesReader tmp = pLr[0]; |
|
5177 pLr[0] = pLr[1]; |
|
5178 pLr[1] = tmp; |
|
5179 nLr--; |
|
5180 pLr++; |
|
5181 } |
|
5182 } |
|
5183 |
|
5184 /* Initializes pReaders with the segments from level iLevel, returning |
|
5185 ** the number of segments in *piReaders. Leaves pReaders in sorted |
|
5186 ** order. |
|
5187 */ |
|
5188 static int leavesReadersInit(fulltext_vtab *v, int iLevel, |
|
5189 LeavesReader *pReaders, int *piReaders){ |
|
5190 sqlite3_stmt *s; |
|
5191 int i, rc = sql_get_statement(v, SEGDIR_SELECT_LEVEL_STMT, &s); |
|
5192 if( rc!=SQLITE_OK ) return rc; |
|
5193 |
|
5194 rc = sqlite3_bind_int(s, 1, iLevel); |
|
5195 if( rc!=SQLITE_OK ) return rc; |
|
5196 |
|
5197 i = 0; |
|
5198 while( (rc = sqlite3_step(s))==SQLITE_ROW ){ |
|
5199 sqlite_int64 iStart = sqlite3_column_int64(s, 0); |
|
5200 sqlite_int64 iEnd = sqlite3_column_int64(s, 1); |
|
5201 const char *pRootData = sqlite3_column_blob(s, 2); |
|
5202 int nRootData = sqlite3_column_bytes(s, 2); |
|
5203 |
|
5204 assert( i<MERGE_COUNT ); |
|
5205 rc = leavesReaderInit(v, i, iStart, iEnd, pRootData, nRootData, |
|
5206 &pReaders[i]); |
|
5207 if( rc!=SQLITE_OK ) break; |
|
5208 |
|
5209 i++; |
|
5210 } |
|
5211 if( rc!=SQLITE_DONE ){ |
|
5212 while( i-->0 ){ |
|
5213 leavesReaderDestroy(&pReaders[i]); |
|
5214 } |
|
5215 return rc; |
|
5216 } |
|
5217 |
|
5218 *piReaders = i; |
|
5219 |
|
5220 /* Leave our results sorted by term, then age. */ |
|
5221 while( i-- ){ |
|
5222 leavesReaderReorder(pReaders+i, *piReaders-i); |
|
5223 } |
|
5224 return SQLITE_OK; |
|
5225 } |
|
5226 |
|
5227 /* Merge doclists from pReaders[nReaders] into a single doclist, which |
|
5228 ** is written to pWriter. Assumes pReaders is ordered oldest to |
|
5229 ** newest. |
|
5230 */ |
|
5231 /* TODO(shess) Consider putting this inline in segmentMerge(). */ |
|
5232 static int leavesReadersMerge(fulltext_vtab *v, |
|
5233 LeavesReader *pReaders, int nReaders, |
|
5234 LeafWriter *pWriter){ |
|
5235 DLReader dlReaders[MERGE_COUNT]; |
|
5236 const char *pTerm = leavesReaderTerm(pReaders); |
|
5237 int i, nTerm = leavesReaderTermBytes(pReaders); |
|
5238 |
|
5239 assert( nReaders<=MERGE_COUNT ); |
|
5240 |
|
5241 for(i=0; i<nReaders; i++){ |
|
5242 dlrInit(&dlReaders[i], DL_DEFAULT, |
|
5243 leavesReaderData(pReaders+i), |
|
5244 leavesReaderDataBytes(pReaders+i)); |
|
5245 } |
|
5246 |
|
5247 return leafWriterStepMerge(v, pWriter, pTerm, nTerm, dlReaders, nReaders); |
|
5248 } |
|
5249 |
|
5250 /* Forward ref due to mutual recursion with segdirNextIndex(). */ |
|
5251 static int segmentMerge(fulltext_vtab *v, int iLevel); |
|
5252 |
|
5253 /* Put the next available index at iLevel into *pidx. If iLevel |
|
5254 ** already has MERGE_COUNT segments, they are merged to a higher |
|
5255 ** level to make room. |
|
5256 */ |
|
5257 static int segdirNextIndex(fulltext_vtab *v, int iLevel, int *pidx){ |
|
5258 int rc = segdir_max_index(v, iLevel, pidx); |
|
5259 if( rc==SQLITE_DONE ){ /* No segments at iLevel. */ |
|
5260 *pidx = 0; |
|
5261 }else if( rc==SQLITE_ROW ){ |
|
5262 if( *pidx==(MERGE_COUNT-1) ){ |
|
5263 rc = segmentMerge(v, iLevel); |
|
5264 if( rc!=SQLITE_OK ) return rc; |
|
5265 *pidx = 0; |
|
5266 }else{ |
|
5267 (*pidx)++; |
|
5268 } |
|
5269 }else{ |
|
5270 return rc; |
|
5271 } |
|
5272 return SQLITE_OK; |
|
5273 } |
|
5274 |
|
5275 /* Merge MERGE_COUNT segments at iLevel into a new segment at |
|
5276 ** iLevel+1. If iLevel+1 is already full of segments, those will be |
|
5277 ** merged to make room. |
|
5278 */ |
|
5279 static int segmentMerge(fulltext_vtab *v, int iLevel){ |
|
5280 LeafWriter writer; |
|
5281 LeavesReader lrs[MERGE_COUNT]; |
|
5282 int i, rc, idx = 0; |
|
5283 |
|
5284 /* Determine the next available segment index at the next level, |
|
5285 ** merging as necessary. |
|
5286 */ |
|
5287 rc = segdirNextIndex(v, iLevel+1, &idx); |
|
5288 if( rc!=SQLITE_OK ) return rc; |
|
5289 |
|
5290 /* TODO(shess) This assumes that we'll always see exactly |
|
5291 ** MERGE_COUNT segments to merge at a given level. That will be |
|
5292 ** broken if we allow the developer to request preemptive or |
|
5293 ** deferred merging. |
|
5294 */ |
|
5295 memset(&lrs, '\0', sizeof(lrs)); |
|
5296 rc = leavesReadersInit(v, iLevel, lrs, &i); |
|
5297 if( rc!=SQLITE_OK ) return rc; |
|
5298 assert( i==MERGE_COUNT ); |
|
5299 |
|
5300 leafWriterInit(iLevel+1, idx, &writer); |
|
5301 |
|
5302 /* Since leavesReaderReorder() pushes readers at eof to the end, |
|
5303 ** when the first reader is empty, all will be empty. |
|
5304 */ |
|
5305 while( !leavesReaderAtEnd(lrs) ){ |
|
5306 /* Figure out how many readers share their next term. */ |
|
5307 for(i=1; i<MERGE_COUNT && !leavesReaderAtEnd(lrs+i); i++){ |
|
5308 if( 0!=leavesReaderTermCmp(lrs, lrs+i) ) break; |
|
5309 } |
|
5310 |
|
5311 rc = leavesReadersMerge(v, lrs, i, &writer); |
|
5312 if( rc!=SQLITE_OK ) goto err; |
|
5313 |
|
5314 /* Step forward those that were merged. */ |
|
5315 while( i-->0 ){ |
|
5316 rc = leavesReaderStep(v, lrs+i); |
|
5317 if( rc!=SQLITE_OK ) goto err; |
|
5318 |
|
5319 /* Reorder by term, then by age. */ |
|
5320 leavesReaderReorder(lrs+i, MERGE_COUNT-i); |
|
5321 } |
|
5322 } |
|
5323 |
|
5324 for(i=0; i<MERGE_COUNT; i++){ |
|
5325 leavesReaderDestroy(&lrs[i]); |
|
5326 } |
|
5327 |
|
5328 rc = leafWriterFinalize(v, &writer); |
|
5329 leafWriterDestroy(&writer); |
|
5330 if( rc!=SQLITE_OK ) return rc; |
|
5331 |
|
5332 /* Delete the merged segment data. */ |
|
5333 return segdir_delete(v, iLevel); |
|
5334 |
|
5335 err: |
|
5336 for(i=0; i<MERGE_COUNT; i++){ |
|
5337 leavesReaderDestroy(&lrs[i]); |
|
5338 } |
|
5339 leafWriterDestroy(&writer); |
|
5340 return rc; |
|
5341 } |
|
5342 |
|
5343 /* Accumulate the union of *acc and *pData into *acc. */ |
|
5344 static void docListAccumulateUnion(DataBuffer *acc, |
|
5345 const char *pData, int nData) { |
|
5346 DataBuffer tmp = *acc; |
|
5347 dataBufferInit(acc, tmp.nData+nData); |
|
5348 docListUnion(tmp.pData, tmp.nData, pData, nData, acc); |
|
5349 dataBufferDestroy(&tmp); |
|
5350 } |
|
5351 |
|
5352 /* TODO(shess) It might be interesting to explore different merge |
|
5353 ** strategies, here. For instance, since this is a sorted merge, we |
|
5354 ** could easily merge many doclists in parallel. With some |
|
5355 ** comprehension of the storage format, we could merge all of the |
|
5356 ** doclists within a leaf node directly from the leaf node's storage. |
|
5357 ** It may be worthwhile to merge smaller doclists before larger |
|
5358 ** doclists, since they can be traversed more quickly - but the |
|
5359 ** results may have less overlap, making them more expensive in a |
|
5360 ** different way. |
|
5361 */ |
|
5362 |
|
5363 /* Scan pReader for pTerm/nTerm, and merge the term's doclist over |
|
5364 ** *out (any doclists with duplicate docids overwrite those in *out). |
|
5365 ** Internal function for loadSegmentLeaf(). |
|
5366 */ |
|
5367 static int loadSegmentLeavesInt(fulltext_vtab *v, LeavesReader *pReader, |
|
5368 const char *pTerm, int nTerm, int isPrefix, |
|
5369 DataBuffer *out){ |
|
5370 /* doclist data is accumulated into pBuffers similar to how one does |
|
5371 ** increment in binary arithmetic. If index 0 is empty, the data is |
|
5372 ** stored there. If there is data there, it is merged and the |
|
5373 ** results carried into position 1, with further merge-and-carry |
|
5374 ** until an empty position is found. |
|
5375 */ |
|
5376 DataBuffer *pBuffers = NULL; |
|
5377 int nBuffers = 0, nMaxBuffers = 0, rc; |
|
5378 |
|
5379 assert( nTerm>0 ); |
|
5380 |
|
5381 for(rc=SQLITE_OK; rc==SQLITE_OK && !leavesReaderAtEnd(pReader); |
|
5382 rc=leavesReaderStep(v, pReader)){ |
|
5383 /* TODO(shess) Really want leavesReaderTermCmp(), but that name is |
|
5384 ** already taken to compare the terms of two LeavesReaders. Think |
|
5385 ** on a better name. [Meanwhile, break encapsulation rather than |
|
5386 ** use a confusing name.] |
|
5387 */ |
|
5388 int c = leafReaderTermCmp(&pReader->leafReader, pTerm, nTerm, isPrefix); |
|
5389 if( c>0 ) break; /* Past any possible matches. */ |
|
5390 if( c==0 ){ |
|
5391 const char *pData = leavesReaderData(pReader); |
|
5392 int iBuffer, nData = leavesReaderDataBytes(pReader); |
|
5393 |
|
5394 /* Find the first empty buffer. */ |
|
5395 for(iBuffer=0; iBuffer<nBuffers; ++iBuffer){ |
|
5396 if( 0==pBuffers[iBuffer].nData ) break; |
|
5397 } |
|
5398 |
|
5399 /* Out of buffers, add an empty one. */ |
|
5400 if( iBuffer==nBuffers ){ |
|
5401 if( nBuffers==nMaxBuffers ){ |
|
5402 DataBuffer *p; |
|
5403 nMaxBuffers += 20; |
|
5404 |
|
5405 /* Manual realloc so we can handle NULL appropriately. */ |
|
5406 p = sqlite3_malloc(nMaxBuffers*sizeof(*pBuffers)); |
|
5407 if( p==NULL ){ |
|
5408 rc = SQLITE_NOMEM; |
|
5409 break; |
|
5410 } |
|
5411 |
|
5412 if( nBuffers>0 ){ |
|
5413 assert(pBuffers!=NULL); |
|
5414 memcpy(p, pBuffers, nBuffers*sizeof(*pBuffers)); |
|
5415 sqlite3_free(pBuffers); |
|
5416 } |
|
5417 pBuffers = p; |
|
5418 } |
|
5419 dataBufferInit(&(pBuffers[nBuffers]), 0); |
|
5420 nBuffers++; |
|
5421 } |
|
5422 |
|
5423 /* At this point, must have an empty at iBuffer. */ |
|
5424 assert(iBuffer<nBuffers && pBuffers[iBuffer].nData==0); |
|
5425 |
|
5426 /* If empty was first buffer, no need for merge logic. */ |
|
5427 if( iBuffer==0 ){ |
|
5428 dataBufferReplace(&(pBuffers[0]), pData, nData); |
|
5429 }else{ |
|
5430 /* pAcc is the empty buffer the merged data will end up in. */ |
|
5431 DataBuffer *pAcc = &(pBuffers[iBuffer]); |
|
5432 DataBuffer *p = &(pBuffers[0]); |
|
5433 |
|
5434 /* Handle position 0 specially to avoid need to prime pAcc |
|
5435 ** with pData/nData. |
|
5436 */ |
|
5437 dataBufferSwap(p, pAcc); |
|
5438 docListAccumulateUnion(pAcc, pData, nData); |
|
5439 |
|
5440 /* Accumulate remaining doclists into pAcc. */ |
|
5441 for(++p; p<pAcc; ++p){ |
|
5442 docListAccumulateUnion(pAcc, p->pData, p->nData); |
|
5443 |
|
5444 /* dataBufferReset() could allow a large doclist to blow up |
|
5445 ** our memory requirements. |
|
5446 */ |
|
5447 if( p->nCapacity<1024 ){ |
|
5448 dataBufferReset(p); |
|
5449 }else{ |
|
5450 dataBufferDestroy(p); |
|
5451 dataBufferInit(p, 0); |
|
5452 } |
|
5453 } |
|
5454 } |
|
5455 } |
|
5456 } |
|
5457 |
|
5458 /* Union all the doclists together into *out. */ |
|
5459 /* TODO(shess) What if *out is big? Sigh. */ |
|
5460 if( rc==SQLITE_OK && nBuffers>0 ){ |
|
5461 int iBuffer; |
|
5462 for(iBuffer=0; iBuffer<nBuffers; ++iBuffer){ |
|
5463 if( pBuffers[iBuffer].nData>0 ){ |
|
5464 if( out->nData==0 ){ |
|
5465 dataBufferSwap(out, &(pBuffers[iBuffer])); |
|
5466 }else{ |
|
5467 docListAccumulateUnion(out, pBuffers[iBuffer].pData, |
|
5468 pBuffers[iBuffer].nData); |
|
5469 } |
|
5470 } |
|
5471 } |
|
5472 } |
|
5473 |
|
5474 while( nBuffers-- ){ |
|
5475 dataBufferDestroy(&(pBuffers[nBuffers])); |
|
5476 } |
|
5477 if( pBuffers!=NULL ) sqlite3_free(pBuffers); |
|
5478 |
|
5479 return rc; |
|
5480 } |
|
5481 |
|
5482 /* Call loadSegmentLeavesInt() with pData/nData as input. */ |
|
5483 static int loadSegmentLeaf(fulltext_vtab *v, const char *pData, int nData, |
|
5484 const char *pTerm, int nTerm, int isPrefix, |
|
5485 DataBuffer *out){ |
|
5486 LeavesReader reader; |
|
5487 int rc; |
|
5488 |
|
5489 assert( nData>1 ); |
|
5490 assert( *pData=='\0' ); |
|
5491 rc = leavesReaderInit(v, 0, 0, 0, pData, nData, &reader); |
|
5492 if( rc!=SQLITE_OK ) return rc; |
|
5493 |
|
5494 rc = loadSegmentLeavesInt(v, &reader, pTerm, nTerm, isPrefix, out); |
|
5495 leavesReaderReset(&reader); |
|
5496 leavesReaderDestroy(&reader); |
|
5497 return rc; |
|
5498 } |
|
5499 |
|
5500 /* Call loadSegmentLeavesInt() with the leaf nodes from iStartLeaf to |
|
5501 ** iEndLeaf (inclusive) as input, and merge the resulting doclist into |
|
5502 ** out. |
|
5503 */ |
|
5504 static int loadSegmentLeaves(fulltext_vtab *v, |
|
5505 sqlite_int64 iStartLeaf, sqlite_int64 iEndLeaf, |
|
5506 const char *pTerm, int nTerm, int isPrefix, |
|
5507 DataBuffer *out){ |
|
5508 int rc; |
|
5509 LeavesReader reader; |
|
5510 |
|
5511 assert( iStartLeaf<=iEndLeaf ); |
|
5512 rc = leavesReaderInit(v, 0, iStartLeaf, iEndLeaf, NULL, 0, &reader); |
|
5513 if( rc!=SQLITE_OK ) return rc; |
|
5514 |
|
5515 rc = loadSegmentLeavesInt(v, &reader, pTerm, nTerm, isPrefix, out); |
|
5516 leavesReaderReset(&reader); |
|
5517 leavesReaderDestroy(&reader); |
|
5518 return rc; |
|
5519 } |
|
5520 |
|
5521 /* Taking pData/nData as an interior node, find the sequence of child |
|
5522 ** nodes which could include pTerm/nTerm/isPrefix. Note that the |
|
5523 ** interior node terms logically come between the blocks, so there is |
|
5524 ** one more blockid than there are terms (that block contains terms >= |
|
5525 ** the last interior-node term). |
|
5526 */ |
|
5527 /* TODO(shess) The calling code may already know that the end child is |
|
5528 ** not worth calculating, because the end may be in a later sibling |
|
5529 ** node. Consider whether breaking symmetry is worthwhile. I suspect |
|
5530 ** it is not worthwhile. |
|
5531 */ |
|
5532 static void getChildrenContaining(const char *pData, int nData, |
|
5533 const char *pTerm, int nTerm, int isPrefix, |
|
5534 sqlite_int64 *piStartChild, |
|
5535 sqlite_int64 *piEndChild){ |
|
5536 InteriorReader reader; |
|
5537 |
|
5538 assert( nData>1 ); |
|
5539 assert( *pData!='\0' ); |
|
5540 interiorReaderInit(pData, nData, &reader); |
|
5541 |
|
5542 /* Scan for the first child which could contain pTerm/nTerm. */ |
|
5543 while( !interiorReaderAtEnd(&reader) ){ |
|
5544 if( interiorReaderTermCmp(&reader, pTerm, nTerm, 0)>0 ) break; |
|
5545 interiorReaderStep(&reader); |
|
5546 } |
|
5547 *piStartChild = interiorReaderCurrentBlockid(&reader); |
|
5548 |
|
5549 /* Keep scanning to find a term greater than our term, using prefix |
|
5550 ** comparison if indicated. If isPrefix is false, this will be the |
|
5551 ** same blockid as the starting block. |
|
5552 */ |
|
5553 while( !interiorReaderAtEnd(&reader) ){ |
|
5554 if( interiorReaderTermCmp(&reader, pTerm, nTerm, isPrefix)>0 ) break; |
|
5555 interiorReaderStep(&reader); |
|
5556 } |
|
5557 *piEndChild = interiorReaderCurrentBlockid(&reader); |
|
5558 |
|
5559 interiorReaderDestroy(&reader); |
|
5560 |
|
5561 /* Children must ascend, and if !prefix, both must be the same. */ |
|
5562 assert( *piEndChild>=*piStartChild ); |
|
5563 assert( isPrefix || *piStartChild==*piEndChild ); |
|
5564 } |
|
5565 |
|
5566 /* Read block at iBlockid and pass it with other params to |
|
5567 ** getChildrenContaining(). |
|
5568 */ |
|
5569 static int loadAndGetChildrenContaining( |
|
5570 fulltext_vtab *v, |
|
5571 sqlite_int64 iBlockid, |
|
5572 const char *pTerm, int nTerm, int isPrefix, |
|
5573 sqlite_int64 *piStartChild, sqlite_int64 *piEndChild |
|
5574 ){ |
|
5575 sqlite3_stmt *s = NULL; |
|
5576 int rc; |
|
5577 |
|
5578 assert( iBlockid!=0 ); |
|
5579 assert( pTerm!=NULL ); |
|
5580 assert( nTerm!=0 ); /* TODO(shess) Why not allow this? */ |
|
5581 assert( piStartChild!=NULL ); |
|
5582 assert( piEndChild!=NULL ); |
|
5583 |
|
5584 rc = sql_get_statement(v, BLOCK_SELECT_STMT, &s); |
|
5585 if( rc!=SQLITE_OK ) return rc; |
|
5586 |
|
5587 rc = sqlite3_bind_int64(s, 1, iBlockid); |
|
5588 if( rc!=SQLITE_OK ) return rc; |
|
5589 |
|
5590 rc = sqlite3_step(s); |
|
5591 if( rc==SQLITE_DONE ) return SQLITE_ERROR; |
|
5592 if( rc!=SQLITE_ROW ) return rc; |
|
5593 |
|
5594 getChildrenContaining(sqlite3_column_blob(s, 0), sqlite3_column_bytes(s, 0), |
|
5595 pTerm, nTerm, isPrefix, piStartChild, piEndChild); |
|
5596 |
|
5597 /* We expect only one row. We must execute another sqlite3_step() |
|
5598 * to complete the iteration; otherwise the table will remain |
|
5599 * locked. */ |
|
5600 rc = sqlite3_step(s); |
|
5601 if( rc==SQLITE_ROW ) return SQLITE_ERROR; |
|
5602 if( rc!=SQLITE_DONE ) return rc; |
|
5603 |
|
5604 return SQLITE_OK; |
|
5605 } |
|
5606 |
|
5607 /* Traverse the tree represented by pData[nData] looking for |
|
5608 ** pTerm[nTerm], placing its doclist into *out. This is internal to |
|
5609 ** loadSegment() to make error-handling cleaner. |
|
5610 */ |
|
5611 static int loadSegmentInt(fulltext_vtab *v, const char *pData, int nData, |
|
5612 sqlite_int64 iLeavesEnd, |
|
5613 const char *pTerm, int nTerm, int isPrefix, |
|
5614 DataBuffer *out){ |
|
5615 /* Special case where root is a leaf. */ |
|
5616 if( *pData=='\0' ){ |
|
5617 return loadSegmentLeaf(v, pData, nData, pTerm, nTerm, isPrefix, out); |
|
5618 }else{ |
|
5619 int rc; |
|
5620 sqlite_int64 iStartChild, iEndChild; |
|
5621 |
|
5622 /* Process pData as an interior node, then loop down the tree |
|
5623 ** until we find the set of leaf nodes to scan for the term. |
|
5624 */ |
|
5625 getChildrenContaining(pData, nData, pTerm, nTerm, isPrefix, |
|
5626 &iStartChild, &iEndChild); |
|
5627 while( iStartChild>iLeavesEnd ){ |
|
5628 sqlite_int64 iNextStart, iNextEnd; |
|
5629 rc = loadAndGetChildrenContaining(v, iStartChild, pTerm, nTerm, isPrefix, |
|
5630 &iNextStart, &iNextEnd); |
|
5631 if( rc!=SQLITE_OK ) return rc; |
|
5632 |
|
5633 /* If we've branched, follow the end branch, too. */ |
|
5634 if( iStartChild!=iEndChild ){ |
|
5635 sqlite_int64 iDummy; |
|
5636 rc = loadAndGetChildrenContaining(v, iEndChild, pTerm, nTerm, isPrefix, |
|
5637 &iDummy, &iNextEnd); |
|
5638 if( rc!=SQLITE_OK ) return rc; |
|
5639 } |
|
5640 |
|
5641 assert( iNextStart<=iNextEnd ); |
|
5642 iStartChild = iNextStart; |
|
5643 iEndChild = iNextEnd; |
|
5644 } |
|
5645 assert( iStartChild<=iLeavesEnd ); |
|
5646 assert( iEndChild<=iLeavesEnd ); |
|
5647 |
|
5648 /* Scan through the leaf segments for doclists. */ |
|
5649 return loadSegmentLeaves(v, iStartChild, iEndChild, |
|
5650 pTerm, nTerm, isPrefix, out); |
|
5651 } |
|
5652 } |
|
5653 |
|
5654 /* Call loadSegmentInt() to collect the doclist for pTerm/nTerm, then |
|
5655 ** merge its doclist over *out (any duplicate doclists read from the |
|
5656 ** segment rooted at pData will overwrite those in *out). |
|
5657 */ |
|
5658 /* TODO(shess) Consider changing this to determine the depth of the |
|
5659 ** leaves using either the first characters of interior nodes (when |
|
5660 ** ==1, we're one level above the leaves), or the first character of |
|
5661 ** the root (which will describe the height of the tree directly). |
|
5662 ** Either feels somewhat tricky to me. |
|
5663 */ |
|
5664 /* TODO(shess) The current merge is likely to be slow for large |
|
5665 ** doclists (though it should process from newest/smallest to |
|
5666 ** oldest/largest, so it may not be that bad). It might be useful to |
|
5667 ** modify things to allow for N-way merging. This could either be |
|
5668 ** within a segment, with pairwise merges across segments, or across |
|
5669 ** all segments at once. |
|
5670 */ |
|
5671 static int loadSegment(fulltext_vtab *v, const char *pData, int nData, |
|
5672 sqlite_int64 iLeavesEnd, |
|
5673 const char *pTerm, int nTerm, int isPrefix, |
|
5674 DataBuffer *out){ |
|
5675 DataBuffer result; |
|
5676 int rc; |
|
5677 |
|
5678 assert( nData>1 ); |
|
5679 |
|
5680 /* This code should never be called with buffered updates. */ |
|
5681 assert( v->nPendingData<0 ); |
|
5682 |
|
5683 dataBufferInit(&result, 0); |
|
5684 rc = loadSegmentInt(v, pData, nData, iLeavesEnd, |
|
5685 pTerm, nTerm, isPrefix, &result); |
|
5686 if( rc==SQLITE_OK && result.nData>0 ){ |
|
5687 if( out->nData==0 ){ |
|
5688 DataBuffer tmp = *out; |
|
5689 *out = result; |
|
5690 result = tmp; |
|
5691 }else{ |
|
5692 DataBuffer merged; |
|
5693 DLReader readers[2]; |
|
5694 |
|
5695 dlrInit(&readers[0], DL_DEFAULT, out->pData, out->nData); |
|
5696 dlrInit(&readers[1], DL_DEFAULT, result.pData, result.nData); |
|
5697 dataBufferInit(&merged, out->nData+result.nData); |
|
5698 docListMerge(&merged, readers, 2); |
|
5699 dataBufferDestroy(out); |
|
5700 *out = merged; |
|
5701 dlrDestroy(&readers[0]); |
|
5702 dlrDestroy(&readers[1]); |
|
5703 } |
|
5704 } |
|
5705 dataBufferDestroy(&result); |
|
5706 return rc; |
|
5707 } |
|
5708 |
|
5709 /* Scan the database and merge together the posting lists for the term |
|
5710 ** into *out. |
|
5711 */ |
|
5712 static int termSelect(fulltext_vtab *v, int iColumn, |
|
5713 const char *pTerm, int nTerm, int isPrefix, |
|
5714 DocListType iType, DataBuffer *out){ |
|
5715 DataBuffer doclist; |
|
5716 sqlite3_stmt *s; |
|
5717 int rc = sql_get_statement(v, SEGDIR_SELECT_ALL_STMT, &s); |
|
5718 if( rc!=SQLITE_OK ) return rc; |
|
5719 |
|
5720 /* This code should never be called with buffered updates. */ |
|
5721 assert( v->nPendingData<0 ); |
|
5722 |
|
5723 dataBufferInit(&doclist, 0); |
|
5724 |
|
5725 /* Traverse the segments from oldest to newest so that newer doclist |
|
5726 ** elements for given docids overwrite older elements. |
|
5727 */ |
|
5728 while( (rc = sqlite3_step(s))==SQLITE_ROW ){ |
|
5729 const char *pData = sqlite3_column_blob(s, 2); |
|
5730 const int nData = sqlite3_column_bytes(s, 2); |
|
5731 const sqlite_int64 iLeavesEnd = sqlite3_column_int64(s, 1); |
|
5732 rc = loadSegment(v, pData, nData, iLeavesEnd, pTerm, nTerm, isPrefix, |
|
5733 &doclist); |
|
5734 if( rc!=SQLITE_OK ) goto err; |
|
5735 } |
|
5736 if( rc==SQLITE_DONE ){ |
|
5737 if( doclist.nData!=0 ){ |
|
5738 /* TODO(shess) The old term_select_all() code applied the column |
|
5739 ** restrict as we merged segments, leading to smaller buffers. |
|
5740 ** This is probably worthwhile to bring back, once the new storage |
|
5741 ** system is checked in. |
|
5742 */ |
|
5743 if( iColumn==v->nColumn) iColumn = -1; |
|
5744 docListTrim(DL_DEFAULT, doclist.pData, doclist.nData, |
|
5745 iColumn, iType, out); |
|
5746 } |
|
5747 rc = SQLITE_OK; |
|
5748 } |
|
5749 |
|
5750 err: |
|
5751 dataBufferDestroy(&doclist); |
|
5752 return rc; |
|
5753 } |
|
5754 |
|
5755 /****************************************************************/ |
|
5756 /* Used to hold hashtable data for sorting. */ |
|
5757 typedef struct TermData { |
|
5758 const char *pTerm; |
|
5759 int nTerm; |
|
5760 DLCollector *pCollector; |
|
5761 } TermData; |
|
5762 |
|
5763 /* Orders TermData elements in strcmp fashion ( <0 for less-than, 0 |
|
5764 ** for equal, >0 for greater-than). |
|
5765 */ |
|
5766 static int termDataCmp(const void *av, const void *bv){ |
|
5767 const TermData *a = (const TermData *)av; |
|
5768 const TermData *b = (const TermData *)bv; |
|
5769 int n = a->nTerm<b->nTerm ? a->nTerm : b->nTerm; |
|
5770 int c = memcmp(a->pTerm, b->pTerm, n); |
|
5771 if( c!=0 ) return c; |
|
5772 return a->nTerm-b->nTerm; |
|
5773 } |
|
5774 |
|
5775 /* Order pTerms data by term, then write a new level 0 segment using |
|
5776 ** LeafWriter. |
|
5777 */ |
|
5778 static int writeZeroSegment(fulltext_vtab *v, fts2Hash *pTerms){ |
|
5779 fts2HashElem *e; |
|
5780 int idx, rc, i, n; |
|
5781 TermData *pData; |
|
5782 LeafWriter writer; |
|
5783 DataBuffer dl; |
|
5784 |
|
5785 /* Determine the next index at level 0, merging as necessary. */ |
|
5786 rc = segdirNextIndex(v, 0, &idx); |
|
5787 if( rc!=SQLITE_OK ) return rc; |
|
5788 |
|
5789 n = fts2HashCount(pTerms); |
|
5790 pData = sqlite3_malloc(n*sizeof(TermData)); |
|
5791 |
|
5792 for(i = 0, e = fts2HashFirst(pTerms); e; i++, e = fts2HashNext(e)){ |
|
5793 assert( i<n ); |
|
5794 pData[i].pTerm = fts2HashKey(e); |
|
5795 pData[i].nTerm = fts2HashKeysize(e); |
|
5796 pData[i].pCollector = fts2HashData(e); |
|
5797 } |
|
5798 assert( i==n ); |
|
5799 |
|
5800 /* TODO(shess) Should we allow user-defined collation sequences, |
|
5801 ** here? I think we only need that once we support prefix searches. |
|
5802 */ |
|
5803 if( n>1 ) qsort(pData, n, sizeof(*pData), termDataCmp); |
|
5804 |
|
5805 /* TODO(shess) Refactor so that we can write directly to the segment |
|
5806 ** DataBuffer, as happens for segment merges. |
|
5807 */ |
|
5808 leafWriterInit(0, idx, &writer); |
|
5809 dataBufferInit(&dl, 0); |
|
5810 for(i=0; i<n; i++){ |
|
5811 dataBufferReset(&dl); |
|
5812 dlcAddDoclist(pData[i].pCollector, &dl); |
|
5813 rc = leafWriterStep(v, &writer, |
|
5814 pData[i].pTerm, pData[i].nTerm, dl.pData, dl.nData); |
|
5815 if( rc!=SQLITE_OK ) goto err; |
|
5816 } |
|
5817 rc = leafWriterFinalize(v, &writer); |
|
5818 |
|
5819 err: |
|
5820 dataBufferDestroy(&dl); |
|
5821 sqlite3_free(pData); |
|
5822 leafWriterDestroy(&writer); |
|
5823 return rc; |
|
5824 } |
|
5825 |
|
5826 /* If pendingTerms has data, free it. */ |
|
5827 static int clearPendingTerms(fulltext_vtab *v){ |
|
5828 if( v->nPendingData>=0 ){ |
|
5829 fts2HashElem *e; |
|
5830 for(e=fts2HashFirst(&v->pendingTerms); e; e=fts2HashNext(e)){ |
|
5831 dlcDelete(fts2HashData(e)); |
|
5832 } |
|
5833 fts2HashClear(&v->pendingTerms); |
|
5834 v->nPendingData = -1; |
|
5835 } |
|
5836 return SQLITE_OK; |
|
5837 } |
|
5838 |
|
5839 /* If pendingTerms has data, flush it to a level-zero segment, and |
|
5840 ** free it. |
|
5841 */ |
|
5842 static int flushPendingTerms(fulltext_vtab *v){ |
|
5843 if( v->nPendingData>=0 ){ |
|
5844 int rc = writeZeroSegment(v, &v->pendingTerms); |
|
5845 if( rc==SQLITE_OK ) clearPendingTerms(v); |
|
5846 return rc; |
|
5847 } |
|
5848 return SQLITE_OK; |
|
5849 } |
|
5850 |
|
5851 /* If pendingTerms is "too big", or docid is out of order, flush it. |
|
5852 ** Regardless, be certain that pendingTerms is initialized for use. |
|
5853 */ |
|
5854 static int initPendingTerms(fulltext_vtab *v, sqlite_int64 iDocid){ |
|
5855 /* TODO(shess) Explore whether partially flushing the buffer on |
|
5856 ** forced-flush would provide better performance. I suspect that if |
|
5857 ** we ordered the doclists by size and flushed the largest until the |
|
5858 ** buffer was half empty, that would let the less frequent terms |
|
5859 ** generate longer doclists. |
|
5860 */ |
|
5861 if( iDocid<=v->iPrevDocid || v->nPendingData>kPendingThreshold ){ |
|
5862 int rc = flushPendingTerms(v); |
|
5863 if( rc!=SQLITE_OK ) return rc; |
|
5864 } |
|
5865 if( v->nPendingData<0 ){ |
|
5866 fts2HashInit(&v->pendingTerms, FTS2_HASH_STRING, 1); |
|
5867 v->nPendingData = 0; |
|
5868 } |
|
5869 v->iPrevDocid = iDocid; |
|
5870 return SQLITE_OK; |
|
5871 } |
|
5872 |
|
5873 /* This function implements the xUpdate callback; it is the top-level entry |
|
5874 * point for inserting, deleting or updating a row in a full-text table. */ |
|
5875 static int fulltextUpdate(sqlite3_vtab *pVtab, int nArg, sqlite3_value **ppArg, |
|
5876 sqlite_int64 *pRowid){ |
|
5877 fulltext_vtab *v = (fulltext_vtab *) pVtab; |
|
5878 int rc; |
|
5879 |
|
5880 TRACE(("FTS2 Update %p\n", pVtab)); |
|
5881 |
|
5882 if( nArg<2 ){ |
|
5883 rc = index_delete(v, sqlite3_value_int64(ppArg[0])); |
|
5884 if( rc==SQLITE_OK ){ |
|
5885 /* If we just deleted the last row in the table, clear out the |
|
5886 ** index data. |
|
5887 */ |
|
5888 rc = content_exists(v); |
|
5889 if( rc==SQLITE_ROW ){ |
|
5890 rc = SQLITE_OK; |
|
5891 }else if( rc==SQLITE_DONE ){ |
|
5892 /* Clear the pending terms so we don't flush a useless level-0 |
|
5893 ** segment when the transaction closes. |
|
5894 */ |
|
5895 rc = clearPendingTerms(v); |
|
5896 if( rc==SQLITE_OK ){ |
|
5897 rc = segdir_delete_all(v); |
|
5898 } |
|
5899 } |
|
5900 } |
|
5901 } else if( sqlite3_value_type(ppArg[0]) != SQLITE_NULL ){ |
|
5902 /* An update: |
|
5903 * ppArg[0] = old rowid |
|
5904 * ppArg[1] = new rowid |
|
5905 * ppArg[2..2+v->nColumn-1] = values |
|
5906 * ppArg[2+v->nColumn] = value for magic column (we ignore this) |
|
5907 */ |
|
5908 sqlite_int64 rowid = sqlite3_value_int64(ppArg[0]); |
|
5909 if( sqlite3_value_type(ppArg[1]) != SQLITE_INTEGER || |
|
5910 sqlite3_value_int64(ppArg[1]) != rowid ){ |
|
5911 rc = SQLITE_ERROR; /* we don't allow changing the rowid */ |
|
5912 } else { |
|
5913 assert( nArg==2+v->nColumn+1); |
|
5914 rc = index_update(v, rowid, &ppArg[2]); |
|
5915 } |
|
5916 } else { |
|
5917 /* An insert: |
|
5918 * ppArg[1] = requested rowid |
|
5919 * ppArg[2..2+v->nColumn-1] = values |
|
5920 * ppArg[2+v->nColumn] = value for magic column (we ignore this) |
|
5921 */ |
|
5922 assert( nArg==2+v->nColumn+1); |
|
5923 rc = index_insert(v, ppArg[1], &ppArg[2], pRowid); |
|
5924 } |
|
5925 |
|
5926 return rc; |
|
5927 } |
|
5928 |
|
5929 static int fulltextSync(sqlite3_vtab *pVtab){ |
|
5930 TRACE(("FTS2 xSync()\n")); |
|
5931 return flushPendingTerms((fulltext_vtab *)pVtab); |
|
5932 } |
|
5933 |
|
5934 static int fulltextBegin(sqlite3_vtab *pVtab){ |
|
5935 fulltext_vtab *v = (fulltext_vtab *) pVtab; |
|
5936 TRACE(("FTS2 xBegin()\n")); |
|
5937 |
|
5938 /* Any buffered updates should have been cleared by the previous |
|
5939 ** transaction. |
|
5940 */ |
|
5941 assert( v->nPendingData<0 ); |
|
5942 return clearPendingTerms(v); |
|
5943 } |
|
5944 |
|
5945 static int fulltextCommit(sqlite3_vtab *pVtab){ |
|
5946 fulltext_vtab *v = (fulltext_vtab *) pVtab; |
|
5947 TRACE(("FTS2 xCommit()\n")); |
|
5948 |
|
5949 /* Buffered updates should have been cleared by fulltextSync(). */ |
|
5950 assert( v->nPendingData<0 ); |
|
5951 return clearPendingTerms(v); |
|
5952 } |
|
5953 |
|
5954 static int fulltextRollback(sqlite3_vtab *pVtab){ |
|
5955 TRACE(("FTS2 xRollback()\n")); |
|
5956 return clearPendingTerms((fulltext_vtab *)pVtab); |
|
5957 } |
|
5958 |
|
5959 /* |
|
5960 ** Implementation of the snippet() function for FTS2 |
|
5961 */ |
|
5962 static void snippetFunc( |
|
5963 sqlite3_context *pContext, |
|
5964 int argc, |
|
5965 sqlite3_value **argv |
|
5966 ){ |
|
5967 fulltext_cursor *pCursor; |
|
5968 if( argc<1 ) return; |
|
5969 if( sqlite3_value_type(argv[0])!=SQLITE_BLOB || |
|
5970 sqlite3_value_bytes(argv[0])!=sizeof(pCursor) ){ |
|
5971 sqlite3_result_error(pContext, "illegal first argument to html_snippet",-1); |
|
5972 }else{ |
|
5973 const char *zStart = "<b>"; |
|
5974 const char *zEnd = "</b>"; |
|
5975 const char *zEllipsis = "<b>...</b>"; |
|
5976 memcpy(&pCursor, sqlite3_value_blob(argv[0]), sizeof(pCursor)); |
|
5977 if( argc>=2 ){ |
|
5978 zStart = (const char*)sqlite3_value_text(argv[1]); |
|
5979 if( argc>=3 ){ |
|
5980 zEnd = (const char*)sqlite3_value_text(argv[2]); |
|
5981 if( argc>=4 ){ |
|
5982 zEllipsis = (const char*)sqlite3_value_text(argv[3]); |
|
5983 } |
|
5984 } |
|
5985 } |
|
5986 snippetAllOffsets(pCursor); |
|
5987 snippetText(pCursor, zStart, zEnd, zEllipsis); |
|
5988 sqlite3_result_text(pContext, pCursor->snippet.zSnippet, |
|
5989 pCursor->snippet.nSnippet, SQLITE_STATIC); |
|
5990 } |
|
5991 } |
|
5992 |
|
5993 /* |
|
5994 ** Implementation of the offsets() function for FTS2 |
|
5995 */ |
|
5996 static void snippetOffsetsFunc( |
|
5997 sqlite3_context *pContext, |
|
5998 int argc, |
|
5999 sqlite3_value **argv |
|
6000 ){ |
|
6001 fulltext_cursor *pCursor; |
|
6002 if( argc<1 ) return; |
|
6003 if( sqlite3_value_type(argv[0])!=SQLITE_BLOB || |
|
6004 sqlite3_value_bytes(argv[0])!=sizeof(pCursor) ){ |
|
6005 sqlite3_result_error(pContext, "illegal first argument to offsets",-1); |
|
6006 }else{ |
|
6007 memcpy(&pCursor, sqlite3_value_blob(argv[0]), sizeof(pCursor)); |
|
6008 snippetAllOffsets(pCursor); |
|
6009 snippetOffsetText(&pCursor->snippet); |
|
6010 sqlite3_result_text(pContext, |
|
6011 pCursor->snippet.zOffset, pCursor->snippet.nOffset, |
|
6012 SQLITE_STATIC); |
|
6013 } |
|
6014 } |
|
6015 |
|
6016 /* OptLeavesReader is nearly identical to LeavesReader, except that |
|
6017 ** where LeavesReader is geared towards the merging of complete |
|
6018 ** segment levels (with exactly MERGE_COUNT segments), OptLeavesReader |
|
6019 ** is geared towards implementation of the optimize() function, and |
|
6020 ** can merge all segments simultaneously. This version may be |
|
6021 ** somewhat less efficient than LeavesReader because it merges into an |
|
6022 ** accumulator rather than doing an N-way merge, but since segment |
|
6023 ** size grows exponentially (so segment count logrithmically) this is |
|
6024 ** probably not an immediate problem. |
|
6025 */ |
|
6026 /* TODO(shess): Prove that assertion, or extend the merge code to |
|
6027 ** merge tree fashion (like the prefix-searching code does). |
|
6028 */ |
|
6029 /* TODO(shess): OptLeavesReader and LeavesReader could probably be |
|
6030 ** merged with little or no loss of performance for LeavesReader. The |
|
6031 ** merged code would need to handle >MERGE_COUNT segments, and would |
|
6032 ** also need to be able to optionally optimize away deletes. |
|
6033 */ |
|
6034 typedef struct OptLeavesReader { |
|
6035 /* Segment number, to order readers by age. */ |
|
6036 int segment; |
|
6037 LeavesReader reader; |
|
6038 } OptLeavesReader; |
|
6039 |
|
6040 static int optLeavesReaderAtEnd(OptLeavesReader *pReader){ |
|
6041 return leavesReaderAtEnd(&pReader->reader); |
|
6042 } |
|
6043 static int optLeavesReaderTermBytes(OptLeavesReader *pReader){ |
|
6044 return leavesReaderTermBytes(&pReader->reader); |
|
6045 } |
|
6046 static const char *optLeavesReaderData(OptLeavesReader *pReader){ |
|
6047 return leavesReaderData(&pReader->reader); |
|
6048 } |
|
6049 static int optLeavesReaderDataBytes(OptLeavesReader *pReader){ |
|
6050 return leavesReaderDataBytes(&pReader->reader); |
|
6051 } |
|
6052 static const char *optLeavesReaderTerm(OptLeavesReader *pReader){ |
|
6053 return leavesReaderTerm(&pReader->reader); |
|
6054 } |
|
6055 static int optLeavesReaderStep(fulltext_vtab *v, OptLeavesReader *pReader){ |
|
6056 return leavesReaderStep(v, &pReader->reader); |
|
6057 } |
|
6058 static int optLeavesReaderTermCmp(OptLeavesReader *lr1, OptLeavesReader *lr2){ |
|
6059 return leavesReaderTermCmp(&lr1->reader, &lr2->reader); |
|
6060 } |
|
6061 /* Order by term ascending, segment ascending (oldest to newest), with |
|
6062 ** exhausted readers to the end. |
|
6063 */ |
|
6064 static int optLeavesReaderCmp(OptLeavesReader *lr1, OptLeavesReader *lr2){ |
|
6065 int c = optLeavesReaderTermCmp(lr1, lr2); |
|
6066 if( c!=0 ) return c; |
|
6067 return lr1->segment-lr2->segment; |
|
6068 } |
|
6069 /* Bubble pLr[0] to appropriate place in pLr[1..nLr-1]. Assumes that |
|
6070 ** pLr[1..nLr-1] is already sorted. |
|
6071 */ |
|
6072 static void optLeavesReaderReorder(OptLeavesReader *pLr, int nLr){ |
|
6073 while( nLr>1 && optLeavesReaderCmp(pLr, pLr+1)>0 ){ |
|
6074 OptLeavesReader tmp = pLr[0]; |
|
6075 pLr[0] = pLr[1]; |
|
6076 pLr[1] = tmp; |
|
6077 nLr--; |
|
6078 pLr++; |
|
6079 } |
|
6080 } |
|
6081 |
|
6082 /* optimize() helper function. Put the readers in order and iterate |
|
6083 ** through them, merging doclists for matching terms into pWriter. |
|
6084 ** Returns SQLITE_OK on success, or the SQLite error code which |
|
6085 ** prevented success. |
|
6086 */ |
|
6087 static int optimizeInternal(fulltext_vtab *v, |
|
6088 OptLeavesReader *readers, int nReaders, |
|
6089 LeafWriter *pWriter){ |
|
6090 int i, rc = SQLITE_OK; |
|
6091 DataBuffer doclist, merged, tmp; |
|
6092 |
|
6093 /* Order the readers. */ |
|
6094 i = nReaders; |
|
6095 while( i-- > 0 ){ |
|
6096 optLeavesReaderReorder(&readers[i], nReaders-i); |
|
6097 } |
|
6098 |
|
6099 dataBufferInit(&doclist, LEAF_MAX); |
|
6100 dataBufferInit(&merged, LEAF_MAX); |
|
6101 |
|
6102 /* Exhausted readers bubble to the end, so when the first reader is |
|
6103 ** at eof, all are at eof. |
|
6104 */ |
|
6105 while( !optLeavesReaderAtEnd(&readers[0]) ){ |
|
6106 |
|
6107 /* Figure out how many readers share the next term. */ |
|
6108 for(i=1; i<nReaders && !optLeavesReaderAtEnd(&readers[i]); i++){ |
|
6109 if( 0!=optLeavesReaderTermCmp(&readers[0], &readers[i]) ) break; |
|
6110 } |
|
6111 |
|
6112 /* Special-case for no merge. */ |
|
6113 if( i==1 ){ |
|
6114 /* Trim deletions from the doclist. */ |
|
6115 dataBufferReset(&merged); |
|
6116 docListTrim(DL_DEFAULT, |
|
6117 optLeavesReaderData(&readers[0]), |
|
6118 optLeavesReaderDataBytes(&readers[0]), |
|
6119 -1, DL_DEFAULT, &merged); |
|
6120 }else{ |
|
6121 DLReader dlReaders[MERGE_COUNT]; |
|
6122 int iReader, nReaders; |
|
6123 |
|
6124 /* Prime the pipeline with the first reader's doclist. After |
|
6125 ** one pass index 0 will reference the accumulated doclist. |
|
6126 */ |
|
6127 dlrInit(&dlReaders[0], DL_DEFAULT, |
|
6128 optLeavesReaderData(&readers[0]), |
|
6129 optLeavesReaderDataBytes(&readers[0])); |
|
6130 iReader = 1; |
|
6131 |
|
6132 assert( iReader<i ); /* Must execute the loop at least once. */ |
|
6133 while( iReader<i ){ |
|
6134 /* Merge 16 inputs per pass. */ |
|
6135 for( nReaders=1; iReader<i && nReaders<MERGE_COUNT; |
|
6136 iReader++, nReaders++ ){ |
|
6137 dlrInit(&dlReaders[nReaders], DL_DEFAULT, |
|
6138 optLeavesReaderData(&readers[iReader]), |
|
6139 optLeavesReaderDataBytes(&readers[iReader])); |
|
6140 } |
|
6141 |
|
6142 /* Merge doclists and swap result into accumulator. */ |
|
6143 dataBufferReset(&merged); |
|
6144 docListMerge(&merged, dlReaders, nReaders); |
|
6145 tmp = merged; |
|
6146 merged = doclist; |
|
6147 doclist = tmp; |
|
6148 |
|
6149 while( nReaders-- > 0 ){ |
|
6150 dlrDestroy(&dlReaders[nReaders]); |
|
6151 } |
|
6152 |
|
6153 /* Accumulated doclist to reader 0 for next pass. */ |
|
6154 dlrInit(&dlReaders[0], DL_DEFAULT, doclist.pData, doclist.nData); |
|
6155 } |
|
6156 |
|
6157 /* Destroy reader that was left in the pipeline. */ |
|
6158 dlrDestroy(&dlReaders[0]); |
|
6159 |
|
6160 /* Trim deletions from the doclist. */ |
|
6161 dataBufferReset(&merged); |
|
6162 docListTrim(DL_DEFAULT, doclist.pData, doclist.nData, |
|
6163 -1, DL_DEFAULT, &merged); |
|
6164 } |
|
6165 |
|
6166 /* Only pass doclists with hits (skip if all hits deleted). */ |
|
6167 if( merged.nData>0 ){ |
|
6168 rc = leafWriterStep(v, pWriter, |
|
6169 optLeavesReaderTerm(&readers[0]), |
|
6170 optLeavesReaderTermBytes(&readers[0]), |
|
6171 merged.pData, merged.nData); |
|
6172 if( rc!=SQLITE_OK ) goto err; |
|
6173 } |
|
6174 |
|
6175 /* Step merged readers to next term and reorder. */ |
|
6176 while( i-- > 0 ){ |
|
6177 rc = optLeavesReaderStep(v, &readers[i]); |
|
6178 if( rc!=SQLITE_OK ) goto err; |
|
6179 |
|
6180 optLeavesReaderReorder(&readers[i], nReaders-i); |
|
6181 } |
|
6182 } |
|
6183 |
|
6184 err: |
|
6185 dataBufferDestroy(&doclist); |
|
6186 dataBufferDestroy(&merged); |
|
6187 return rc; |
|
6188 } |
|
6189 |
|
6190 /* Implement optimize() function for FTS3. optimize(t) merges all |
|
6191 ** segments in the fts index into a single segment. 't' is the magic |
|
6192 ** table-named column. |
|
6193 */ |
|
6194 static void optimizeFunc(sqlite3_context *pContext, |
|
6195 int argc, sqlite3_value **argv){ |
|
6196 fulltext_cursor *pCursor; |
|
6197 if( argc>1 ){ |
|
6198 sqlite3_result_error(pContext, "excess arguments to optimize()",-1); |
|
6199 }else if( sqlite3_value_type(argv[0])!=SQLITE_BLOB || |
|
6200 sqlite3_value_bytes(argv[0])!=sizeof(pCursor) ){ |
|
6201 sqlite3_result_error(pContext, "illegal first argument to optimize",-1); |
|
6202 }else{ |
|
6203 fulltext_vtab *v; |
|
6204 int i, rc, iMaxLevel; |
|
6205 OptLeavesReader *readers; |
|
6206 int nReaders; |
|
6207 LeafWriter writer; |
|
6208 sqlite3_stmt *s; |
|
6209 |
|
6210 memcpy(&pCursor, sqlite3_value_blob(argv[0]), sizeof(pCursor)); |
|
6211 v = cursor_vtab(pCursor); |
|
6212 |
|
6213 /* Flush any buffered updates before optimizing. */ |
|
6214 rc = flushPendingTerms(v); |
|
6215 if( rc!=SQLITE_OK ) goto err; |
|
6216 |
|
6217 rc = segdir_count(v, &nReaders, &iMaxLevel); |
|
6218 if( rc!=SQLITE_OK ) goto err; |
|
6219 if( nReaders==0 || nReaders==1 ){ |
|
6220 sqlite3_result_text(pContext, "Index already optimal", -1, |
|
6221 SQLITE_STATIC); |
|
6222 return; |
|
6223 } |
|
6224 |
|
6225 rc = sql_get_statement(v, SEGDIR_SELECT_ALL_STMT, &s); |
|
6226 if( rc!=SQLITE_OK ) goto err; |
|
6227 |
|
6228 readers = sqlite3_malloc(nReaders*sizeof(readers[0])); |
|
6229 if( readers==NULL ) goto err; |
|
6230 |
|
6231 /* Note that there will already be a segment at this position |
|
6232 ** until we call segdir_delete() on iMaxLevel. |
|
6233 */ |
|
6234 leafWriterInit(iMaxLevel, 0, &writer); |
|
6235 |
|
6236 i = 0; |
|
6237 while( (rc = sqlite3_step(s))==SQLITE_ROW ){ |
|
6238 sqlite_int64 iStart = sqlite3_column_int64(s, 0); |
|
6239 sqlite_int64 iEnd = sqlite3_column_int64(s, 1); |
|
6240 const char *pRootData = sqlite3_column_blob(s, 2); |
|
6241 int nRootData = sqlite3_column_bytes(s, 2); |
|
6242 |
|
6243 assert( i<nReaders ); |
|
6244 rc = leavesReaderInit(v, -1, iStart, iEnd, pRootData, nRootData, |
|
6245 &readers[i].reader); |
|
6246 if( rc!=SQLITE_OK ) break; |
|
6247 |
|
6248 readers[i].segment = i; |
|
6249 i++; |
|
6250 } |
|
6251 |
|
6252 /* If we managed to succesfully read them all, optimize them. */ |
|
6253 if( rc==SQLITE_DONE ){ |
|
6254 assert( i==nReaders ); |
|
6255 rc = optimizeInternal(v, readers, nReaders, &writer); |
|
6256 } |
|
6257 |
|
6258 while( i-- > 0 ){ |
|
6259 leavesReaderDestroy(&readers[i].reader); |
|
6260 } |
|
6261 sqlite3_free(readers); |
|
6262 |
|
6263 /* If we've successfully gotten to here, delete the old segments |
|
6264 ** and flush the interior structure of the new segment. |
|
6265 */ |
|
6266 if( rc==SQLITE_OK ){ |
|
6267 for( i=0; i<=iMaxLevel; i++ ){ |
|
6268 rc = segdir_delete(v, i); |
|
6269 if( rc!=SQLITE_OK ) break; |
|
6270 } |
|
6271 |
|
6272 if( rc==SQLITE_OK ) rc = leafWriterFinalize(v, &writer); |
|
6273 } |
|
6274 |
|
6275 leafWriterDestroy(&writer); |
|
6276 |
|
6277 if( rc!=SQLITE_OK ) goto err; |
|
6278 |
|
6279 sqlite3_result_text(pContext, "Index optimized", -1, SQLITE_STATIC); |
|
6280 return; |
|
6281 |
|
6282 /* TODO(shess): Error-handling needs to be improved along the |
|
6283 ** lines of the dump_ functions. |
|
6284 */ |
|
6285 err: |
|
6286 { |
|
6287 char buf[512]; |
|
6288 sqlite3_snprintf(sizeof(buf), buf, "Error in optimize: %s", |
|
6289 sqlite3_errmsg(sqlite3_context_db_handle(pContext))); |
|
6290 sqlite3_result_error(pContext, buf, -1); |
|
6291 } |
|
6292 } |
|
6293 } |
|
6294 |
|
6295 #ifdef SQLITE_TEST |
|
6296 /* Generate an error of the form "<prefix>: <msg>". If msg is NULL, |
|
6297 ** pull the error from the context's db handle. |
|
6298 */ |
|
6299 static void generateError(sqlite3_context *pContext, |
|
6300 const char *prefix, const char *msg){ |
|
6301 char buf[512]; |
|
6302 if( msg==NULL ) msg = sqlite3_errmsg(sqlite3_context_db_handle(pContext)); |
|
6303 sqlite3_snprintf(sizeof(buf), buf, "%s: %s", prefix, msg); |
|
6304 sqlite3_result_error(pContext, buf, -1); |
|
6305 } |
|
6306 |
|
6307 /* Helper function to collect the set of terms in the segment into |
|
6308 ** pTerms. The segment is defined by the leaf nodes between |
|
6309 ** iStartBlockid and iEndBlockid, inclusive, or by the contents of |
|
6310 ** pRootData if iStartBlockid is 0 (in which case the entire segment |
|
6311 ** fit in a leaf). |
|
6312 */ |
|
6313 static int collectSegmentTerms(fulltext_vtab *v, sqlite3_stmt *s, |
|
6314 fts2Hash *pTerms){ |
|
6315 const sqlite_int64 iStartBlockid = sqlite3_column_int64(s, 0); |
|
6316 const sqlite_int64 iEndBlockid = sqlite3_column_int64(s, 1); |
|
6317 const char *pRootData = sqlite3_column_blob(s, 2); |
|
6318 const int nRootData = sqlite3_column_bytes(s, 2); |
|
6319 LeavesReader reader; |
|
6320 int rc = leavesReaderInit(v, 0, iStartBlockid, iEndBlockid, |
|
6321 pRootData, nRootData, &reader); |
|
6322 if( rc!=SQLITE_OK ) return rc; |
|
6323 |
|
6324 while( rc==SQLITE_OK && !leavesReaderAtEnd(&reader) ){ |
|
6325 const char *pTerm = leavesReaderTerm(&reader); |
|
6326 const int nTerm = leavesReaderTermBytes(&reader); |
|
6327 void *oldValue = sqlite3Fts2HashFind(pTerms, pTerm, nTerm); |
|
6328 void *newValue = (void *)((char *)oldValue+1); |
|
6329 |
|
6330 /* From the comment before sqlite3Fts2HashInsert in fts2_hash.c, |
|
6331 ** the data value passed is returned in case of malloc failure. |
|
6332 */ |
|
6333 if( newValue==sqlite3Fts2HashInsert(pTerms, pTerm, nTerm, newValue) ){ |
|
6334 rc = SQLITE_NOMEM; |
|
6335 }else{ |
|
6336 rc = leavesReaderStep(v, &reader); |
|
6337 } |
|
6338 } |
|
6339 |
|
6340 leavesReaderDestroy(&reader); |
|
6341 return rc; |
|
6342 } |
|
6343 |
|
6344 /* Helper function to build the result string for dump_terms(). */ |
|
6345 static int generateTermsResult(sqlite3_context *pContext, fts2Hash *pTerms){ |
|
6346 int iTerm, nTerms, nResultBytes, iByte; |
|
6347 char *result; |
|
6348 TermData *pData; |
|
6349 fts2HashElem *e; |
|
6350 |
|
6351 /* Iterate pTerms to generate an array of terms in pData for |
|
6352 ** sorting. |
|
6353 */ |
|
6354 nTerms = fts2HashCount(pTerms); |
|
6355 assert( nTerms>0 ); |
|
6356 pData = sqlite3_malloc(nTerms*sizeof(TermData)); |
|
6357 if( pData==NULL ) return SQLITE_NOMEM; |
|
6358 |
|
6359 nResultBytes = 0; |
|
6360 for(iTerm = 0, e = fts2HashFirst(pTerms); e; iTerm++, e = fts2HashNext(e)){ |
|
6361 nResultBytes += fts2HashKeysize(e)+1; /* Term plus trailing space */ |
|
6362 assert( iTerm<nTerms ); |
|
6363 pData[iTerm].pTerm = fts2HashKey(e); |
|
6364 pData[iTerm].nTerm = fts2HashKeysize(e); |
|
6365 pData[iTerm].pCollector = fts2HashData(e); /* unused */ |
|
6366 } |
|
6367 assert( iTerm==nTerms ); |
|
6368 |
|
6369 assert( nResultBytes>0 ); /* nTerms>0, nResultsBytes must be, too. */ |
|
6370 result = sqlite3_malloc(nResultBytes); |
|
6371 if( result==NULL ){ |
|
6372 sqlite3_free(pData); |
|
6373 return SQLITE_NOMEM; |
|
6374 } |
|
6375 |
|
6376 if( nTerms>1 ) qsort(pData, nTerms, sizeof(*pData), termDataCmp); |
|
6377 |
|
6378 /* Read the terms in order to build the result. */ |
|
6379 iByte = 0; |
|
6380 for(iTerm=0; iTerm<nTerms; ++iTerm){ |
|
6381 memcpy(result+iByte, pData[iTerm].pTerm, pData[iTerm].nTerm); |
|
6382 iByte += pData[iTerm].nTerm; |
|
6383 result[iByte++] = ' '; |
|
6384 } |
|
6385 assert( iByte==nResultBytes ); |
|
6386 assert( result[nResultBytes-1]==' ' ); |
|
6387 result[nResultBytes-1] = '\0'; |
|
6388 |
|
6389 /* Passes away ownership of result. */ |
|
6390 sqlite3_result_text(pContext, result, nResultBytes-1, sqlite3_free); |
|
6391 sqlite3_free(pData); |
|
6392 return SQLITE_OK; |
|
6393 } |
|
6394 |
|
6395 /* Implements dump_terms() for use in inspecting the fts2 index from |
|
6396 ** tests. TEXT result containing the ordered list of terms joined by |
|
6397 ** spaces. dump_terms(t, level, idx) dumps the terms for the segment |
|
6398 ** specified by level, idx (in %_segdir), while dump_terms(t) dumps |
|
6399 ** all terms in the index. In both cases t is the fts table's magic |
|
6400 ** table-named column. |
|
6401 */ |
|
6402 static void dumpTermsFunc( |
|
6403 sqlite3_context *pContext, |
|
6404 int argc, sqlite3_value **argv |
|
6405 ){ |
|
6406 fulltext_cursor *pCursor; |
|
6407 if( argc!=3 && argc!=1 ){ |
|
6408 generateError(pContext, "dump_terms", "incorrect arguments"); |
|
6409 }else if( sqlite3_value_type(argv[0])!=SQLITE_BLOB || |
|
6410 sqlite3_value_bytes(argv[0])!=sizeof(pCursor) ){ |
|
6411 generateError(pContext, "dump_terms", "illegal first argument"); |
|
6412 }else{ |
|
6413 fulltext_vtab *v; |
|
6414 fts2Hash terms; |
|
6415 sqlite3_stmt *s = NULL; |
|
6416 int rc; |
|
6417 |
|
6418 memcpy(&pCursor, sqlite3_value_blob(argv[0]), sizeof(pCursor)); |
|
6419 v = cursor_vtab(pCursor); |
|
6420 |
|
6421 /* If passed only the cursor column, get all segments. Otherwise |
|
6422 ** get the segment described by the following two arguments. |
|
6423 */ |
|
6424 if( argc==1 ){ |
|
6425 rc = sql_get_statement(v, SEGDIR_SELECT_ALL_STMT, &s); |
|
6426 }else{ |
|
6427 rc = sql_get_statement(v, SEGDIR_SELECT_SEGMENT_STMT, &s); |
|
6428 if( rc==SQLITE_OK ){ |
|
6429 rc = sqlite3_bind_int(s, 1, sqlite3_value_int(argv[1])); |
|
6430 if( rc==SQLITE_OK ){ |
|
6431 rc = sqlite3_bind_int(s, 2, sqlite3_value_int(argv[2])); |
|
6432 } |
|
6433 } |
|
6434 } |
|
6435 |
|
6436 if( rc!=SQLITE_OK ){ |
|
6437 generateError(pContext, "dump_terms", NULL); |
|
6438 return; |
|
6439 } |
|
6440 |
|
6441 /* Collect the terms for each segment. */ |
|
6442 sqlite3Fts2HashInit(&terms, FTS2_HASH_STRING, 1); |
|
6443 while( (rc = sqlite3_step(s))==SQLITE_ROW ){ |
|
6444 rc = collectSegmentTerms(v, s, &terms); |
|
6445 if( rc!=SQLITE_OK ) break; |
|
6446 } |
|
6447 |
|
6448 if( rc!=SQLITE_DONE ){ |
|
6449 sqlite3_reset(s); |
|
6450 generateError(pContext, "dump_terms", NULL); |
|
6451 }else{ |
|
6452 const int nTerms = fts2HashCount(&terms); |
|
6453 if( nTerms>0 ){ |
|
6454 rc = generateTermsResult(pContext, &terms); |
|
6455 if( rc==SQLITE_NOMEM ){ |
|
6456 generateError(pContext, "dump_terms", "out of memory"); |
|
6457 }else{ |
|
6458 assert( rc==SQLITE_OK ); |
|
6459 } |
|
6460 }else if( argc==3 ){ |
|
6461 /* The specific segment asked for could not be found. */ |
|
6462 generateError(pContext, "dump_terms", "segment not found"); |
|
6463 }else{ |
|
6464 /* No segments found. */ |
|
6465 /* TODO(shess): It should be impossible to reach this. This |
|
6466 ** case can only happen for an empty table, in which case |
|
6467 ** SQLite has no rows to call this function on. |
|
6468 */ |
|
6469 sqlite3_result_null(pContext); |
|
6470 } |
|
6471 } |
|
6472 sqlite3Fts2HashClear(&terms); |
|
6473 } |
|
6474 } |
|
6475 |
|
6476 /* Expand the DL_DEFAULT doclist in pData into a text result in |
|
6477 ** pContext. |
|
6478 */ |
|
6479 static void createDoclistResult(sqlite3_context *pContext, |
|
6480 const char *pData, int nData){ |
|
6481 DataBuffer dump; |
|
6482 DLReader dlReader; |
|
6483 |
|
6484 assert( pData!=NULL && nData>0 ); |
|
6485 |
|
6486 dataBufferInit(&dump, 0); |
|
6487 dlrInit(&dlReader, DL_DEFAULT, pData, nData); |
|
6488 for( ; !dlrAtEnd(&dlReader); dlrStep(&dlReader) ){ |
|
6489 char buf[256]; |
|
6490 PLReader plReader; |
|
6491 |
|
6492 plrInit(&plReader, &dlReader); |
|
6493 if( DL_DEFAULT==DL_DOCIDS || plrAtEnd(&plReader) ){ |
|
6494 sqlite3_snprintf(sizeof(buf), buf, "[%lld] ", dlrDocid(&dlReader)); |
|
6495 dataBufferAppend(&dump, buf, strlen(buf)); |
|
6496 }else{ |
|
6497 int iColumn = plrColumn(&plReader); |
|
6498 |
|
6499 sqlite3_snprintf(sizeof(buf), buf, "[%lld %d[", |
|
6500 dlrDocid(&dlReader), iColumn); |
|
6501 dataBufferAppend(&dump, buf, strlen(buf)); |
|
6502 |
|
6503 for( ; !plrAtEnd(&plReader); plrStep(&plReader) ){ |
|
6504 if( plrColumn(&plReader)!=iColumn ){ |
|
6505 iColumn = plrColumn(&plReader); |
|
6506 sqlite3_snprintf(sizeof(buf), buf, "] %d[", iColumn); |
|
6507 assert( dump.nData>0 ); |
|
6508 dump.nData--; /* Overwrite trailing space. */ |
|
6509 assert( dump.pData[dump.nData]==' '); |
|
6510 dataBufferAppend(&dump, buf, strlen(buf)); |
|
6511 } |
|
6512 if( DL_DEFAULT==DL_POSITIONS_OFFSETS ){ |
|
6513 sqlite3_snprintf(sizeof(buf), buf, "%d,%d,%d ", |
|
6514 plrPosition(&plReader), |
|
6515 plrStartOffset(&plReader), plrEndOffset(&plReader)); |
|
6516 }else if( DL_DEFAULT==DL_POSITIONS ){ |
|
6517 sqlite3_snprintf(sizeof(buf), buf, "%d ", plrPosition(&plReader)); |
|
6518 }else{ |
|
6519 assert( NULL=="Unhandled DL_DEFAULT value"); |
|
6520 } |
|
6521 dataBufferAppend(&dump, buf, strlen(buf)); |
|
6522 } |
|
6523 plrDestroy(&plReader); |
|
6524 |
|
6525 assert( dump.nData>0 ); |
|
6526 dump.nData--; /* Overwrite trailing space. */ |
|
6527 assert( dump.pData[dump.nData]==' '); |
|
6528 dataBufferAppend(&dump, "]] ", 3); |
|
6529 } |
|
6530 } |
|
6531 dlrDestroy(&dlReader); |
|
6532 |
|
6533 assert( dump.nData>0 ); |
|
6534 dump.nData--; /* Overwrite trailing space. */ |
|
6535 assert( dump.pData[dump.nData]==' '); |
|
6536 dump.pData[dump.nData] = '\0'; |
|
6537 assert( dump.nData>0 ); |
|
6538 |
|
6539 /* Passes ownership of dump's buffer to pContext. */ |
|
6540 sqlite3_result_text(pContext, dump.pData, dump.nData, sqlite3_free); |
|
6541 dump.pData = NULL; |
|
6542 dump.nData = dump.nCapacity = 0; |
|
6543 } |
|
6544 |
|
6545 /* Implements dump_doclist() for use in inspecting the fts2 index from |
|
6546 ** tests. TEXT result containing a string representation of the |
|
6547 ** doclist for the indicated term. dump_doclist(t, term, level, idx) |
|
6548 ** dumps the doclist for term from the segment specified by level, idx |
|
6549 ** (in %_segdir), while dump_doclist(t, term) dumps the logical |
|
6550 ** doclist for the term across all segments. The per-segment doclist |
|
6551 ** can contain deletions, while the full-index doclist will not |
|
6552 ** (deletions are omitted). |
|
6553 ** |
|
6554 ** Result formats differ with the setting of DL_DEFAULTS. Examples: |
|
6555 ** |
|
6556 ** DL_DOCIDS: [1] [3] [7] |
|
6557 ** DL_POSITIONS: [1 0[0 4] 1[17]] [3 1[5]] |
|
6558 ** DL_POSITIONS_OFFSETS: [1 0[0,0,3 4,23,26] 1[17,102,105]] [3 1[5,20,23]] |
|
6559 ** |
|
6560 ** In each case the number after the outer '[' is the docid. In the |
|
6561 ** latter two cases, the number before the inner '[' is the column |
|
6562 ** associated with the values within. For DL_POSITIONS the numbers |
|
6563 ** within are the positions, for DL_POSITIONS_OFFSETS they are the |
|
6564 ** position, the start offset, and the end offset. |
|
6565 */ |
|
6566 static void dumpDoclistFunc( |
|
6567 sqlite3_context *pContext, |
|
6568 int argc, sqlite3_value **argv |
|
6569 ){ |
|
6570 fulltext_cursor *pCursor; |
|
6571 if( argc!=2 && argc!=4 ){ |
|
6572 generateError(pContext, "dump_doclist", "incorrect arguments"); |
|
6573 }else if( sqlite3_value_type(argv[0])!=SQLITE_BLOB || |
|
6574 sqlite3_value_bytes(argv[0])!=sizeof(pCursor) ){ |
|
6575 generateError(pContext, "dump_doclist", "illegal first argument"); |
|
6576 }else if( sqlite3_value_text(argv[1])==NULL || |
|
6577 sqlite3_value_text(argv[1])[0]=='\0' ){ |
|
6578 generateError(pContext, "dump_doclist", "empty second argument"); |
|
6579 }else{ |
|
6580 const char *pTerm = (const char *)sqlite3_value_text(argv[1]); |
|
6581 const int nTerm = strlen(pTerm); |
|
6582 fulltext_vtab *v; |
|
6583 int rc; |
|
6584 DataBuffer doclist; |
|
6585 |
|
6586 memcpy(&pCursor, sqlite3_value_blob(argv[0]), sizeof(pCursor)); |
|
6587 v = cursor_vtab(pCursor); |
|
6588 |
|
6589 dataBufferInit(&doclist, 0); |
|
6590 |
|
6591 /* termSelect() yields the same logical doclist that queries are |
|
6592 ** run against. |
|
6593 */ |
|
6594 if( argc==2 ){ |
|
6595 rc = termSelect(v, v->nColumn, pTerm, nTerm, 0, DL_DEFAULT, &doclist); |
|
6596 }else{ |
|
6597 sqlite3_stmt *s = NULL; |
|
6598 |
|
6599 /* Get our specific segment's information. */ |
|
6600 rc = sql_get_statement(v, SEGDIR_SELECT_SEGMENT_STMT, &s); |
|
6601 if( rc==SQLITE_OK ){ |
|
6602 rc = sqlite3_bind_int(s, 1, sqlite3_value_int(argv[2])); |
|
6603 if( rc==SQLITE_OK ){ |
|
6604 rc = sqlite3_bind_int(s, 2, sqlite3_value_int(argv[3])); |
|
6605 } |
|
6606 } |
|
6607 |
|
6608 if( rc==SQLITE_OK ){ |
|
6609 rc = sqlite3_step(s); |
|
6610 |
|
6611 if( rc==SQLITE_DONE ){ |
|
6612 dataBufferDestroy(&doclist); |
|
6613 generateError(pContext, "dump_doclist", "segment not found"); |
|
6614 return; |
|
6615 } |
|
6616 |
|
6617 /* Found a segment, load it into doclist. */ |
|
6618 if( rc==SQLITE_ROW ){ |
|
6619 const sqlite_int64 iLeavesEnd = sqlite3_column_int64(s, 1); |
|
6620 const char *pData = sqlite3_column_blob(s, 2); |
|
6621 const int nData = sqlite3_column_bytes(s, 2); |
|
6622 |
|
6623 /* loadSegment() is used by termSelect() to load each |
|
6624 ** segment's data. |
|
6625 */ |
|
6626 rc = loadSegment(v, pData, nData, iLeavesEnd, pTerm, nTerm, 0, |
|
6627 &doclist); |
|
6628 if( rc==SQLITE_OK ){ |
|
6629 rc = sqlite3_step(s); |
|
6630 |
|
6631 /* Should not have more than one matching segment. */ |
|
6632 if( rc!=SQLITE_DONE ){ |
|
6633 sqlite3_reset(s); |
|
6634 dataBufferDestroy(&doclist); |
|
6635 generateError(pContext, "dump_doclist", "invalid segdir"); |
|
6636 return; |
|
6637 } |
|
6638 rc = SQLITE_OK; |
|
6639 } |
|
6640 } |
|
6641 } |
|
6642 |
|
6643 sqlite3_reset(s); |
|
6644 } |
|
6645 |
|
6646 if( rc==SQLITE_OK ){ |
|
6647 if( doclist.nData>0 ){ |
|
6648 createDoclistResult(pContext, doclist.pData, doclist.nData); |
|
6649 }else{ |
|
6650 /* TODO(shess): This can happen if the term is not present, or |
|
6651 ** if all instances of the term have been deleted and this is |
|
6652 ** an all-index dump. It may be interesting to distinguish |
|
6653 ** these cases. |
|
6654 */ |
|
6655 sqlite3_result_text(pContext, "", 0, SQLITE_STATIC); |
|
6656 } |
|
6657 }else if( rc==SQLITE_NOMEM ){ |
|
6658 /* Handle out-of-memory cases specially because if they are |
|
6659 ** generated in fts2 code they may not be reflected in the db |
|
6660 ** handle. |
|
6661 */ |
|
6662 /* TODO(shess): Handle this more comprehensively. |
|
6663 ** sqlite3ErrStr() has what I need, but is internal. |
|
6664 */ |
|
6665 generateError(pContext, "dump_doclist", "out of memory"); |
|
6666 }else{ |
|
6667 generateError(pContext, "dump_doclist", NULL); |
|
6668 } |
|
6669 |
|
6670 dataBufferDestroy(&doclist); |
|
6671 } |
|
6672 } |
|
6673 #endif |
|
6674 |
|
6675 /* |
|
6676 ** This routine implements the xFindFunction method for the FTS2 |
|
6677 ** virtual table. |
|
6678 */ |
|
6679 static int fulltextFindFunction( |
|
6680 sqlite3_vtab *pVtab, |
|
6681 int nArg, |
|
6682 const char *zName, |
|
6683 void (**pxFunc)(sqlite3_context*,int,sqlite3_value**), |
|
6684 void **ppArg |
|
6685 ){ |
|
6686 if( strcmp(zName,"snippet")==0 ){ |
|
6687 *pxFunc = snippetFunc; |
|
6688 return 1; |
|
6689 }else if( strcmp(zName,"offsets")==0 ){ |
|
6690 *pxFunc = snippetOffsetsFunc; |
|
6691 return 1; |
|
6692 }else if( strcmp(zName,"optimize")==0 ){ |
|
6693 *pxFunc = optimizeFunc; |
|
6694 return 1; |
|
6695 #ifdef SQLITE_TEST |
|
6696 /* NOTE(shess): These functions are present only for testing |
|
6697 ** purposes. No particular effort is made to optimize their |
|
6698 ** execution or how they build their results. |
|
6699 */ |
|
6700 }else if( strcmp(zName,"dump_terms")==0 ){ |
|
6701 /* fprintf(stderr, "Found dump_terms\n"); */ |
|
6702 *pxFunc = dumpTermsFunc; |
|
6703 return 1; |
|
6704 }else if( strcmp(zName,"dump_doclist")==0 ){ |
|
6705 /* fprintf(stderr, "Found dump_doclist\n"); */ |
|
6706 *pxFunc = dumpDoclistFunc; |
|
6707 return 1; |
|
6708 #endif |
|
6709 } |
|
6710 return 0; |
|
6711 } |
|
6712 |
|
6713 /* |
|
6714 ** Rename an fts2 table. |
|
6715 */ |
|
6716 static int fulltextRename( |
|
6717 sqlite3_vtab *pVtab, |
|
6718 const char *zName |
|
6719 ){ |
|
6720 fulltext_vtab *p = (fulltext_vtab *)pVtab; |
|
6721 int rc = SQLITE_NOMEM; |
|
6722 char *zSql = sqlite3_mprintf( |
|
6723 "ALTER TABLE %Q.'%q_content' RENAME TO '%q_content';" |
|
6724 "ALTER TABLE %Q.'%q_segments' RENAME TO '%q_segments';" |
|
6725 "ALTER TABLE %Q.'%q_segdir' RENAME TO '%q_segdir';" |
|
6726 , p->zDb, p->zName, zName |
|
6727 , p->zDb, p->zName, zName |
|
6728 , p->zDb, p->zName, zName |
|
6729 ); |
|
6730 if( zSql ){ |
|
6731 rc = sqlite3_exec(p->db, zSql, 0, 0, 0); |
|
6732 sqlite3_free(zSql); |
|
6733 } |
|
6734 return rc; |
|
6735 } |
|
6736 |
|
6737 static const sqlite3_module fts2Module = { |
|
6738 /* iVersion */ 0, |
|
6739 /* xCreate */ fulltextCreate, |
|
6740 /* xConnect */ fulltextConnect, |
|
6741 /* xBestIndex */ fulltextBestIndex, |
|
6742 /* xDisconnect */ fulltextDisconnect, |
|
6743 /* xDestroy */ fulltextDestroy, |
|
6744 /* xOpen */ fulltextOpen, |
|
6745 /* xClose */ fulltextClose, |
|
6746 /* xFilter */ fulltextFilter, |
|
6747 /* xNext */ fulltextNext, |
|
6748 /* xEof */ fulltextEof, |
|
6749 /* xColumn */ fulltextColumn, |
|
6750 /* xRowid */ fulltextRowid, |
|
6751 /* xUpdate */ fulltextUpdate, |
|
6752 /* xBegin */ fulltextBegin, |
|
6753 /* xSync */ fulltextSync, |
|
6754 /* xCommit */ fulltextCommit, |
|
6755 /* xRollback */ fulltextRollback, |
|
6756 /* xFindFunction */ fulltextFindFunction, |
|
6757 /* xRename */ fulltextRename, |
|
6758 }; |
|
6759 |
|
6760 static void hashDestroy(void *p){ |
|
6761 fts2Hash *pHash = (fts2Hash *)p; |
|
6762 sqlite3Fts2HashClear(pHash); |
|
6763 sqlite3_free(pHash); |
|
6764 } |
|
6765 |
|
6766 /* |
|
6767 ** The fts2 built-in tokenizers - "simple" and "porter" - are implemented |
|
6768 ** in files fts2_tokenizer1.c and fts2_porter.c respectively. The following |
|
6769 ** two forward declarations are for functions declared in these files |
|
6770 ** used to retrieve the respective implementations. |
|
6771 ** |
|
6772 ** Calling sqlite3Fts2SimpleTokenizerModule() sets the value pointed |
|
6773 ** to by the argument to point a the "simple" tokenizer implementation. |
|
6774 ** Function ...PorterTokenizerModule() sets *pModule to point to the |
|
6775 ** porter tokenizer/stemmer implementation. |
|
6776 */ |
|
6777 void sqlite3Fts2SimpleTokenizerModule(sqlite3_tokenizer_module const**ppModule); |
|
6778 void sqlite3Fts2PorterTokenizerModule(sqlite3_tokenizer_module const**ppModule); |
|
6779 void sqlite3Fts2IcuTokenizerModule(sqlite3_tokenizer_module const**ppModule); |
|
6780 |
|
6781 int sqlite3Fts2InitHashTable(sqlite3 *, fts2Hash *, const char *); |
|
6782 |
|
6783 /* |
|
6784 ** Initialise the fts2 extension. If this extension is built as part |
|
6785 ** of the sqlite library, then this function is called directly by |
|
6786 ** SQLite. If fts2 is built as a dynamically loadable extension, this |
|
6787 ** function is called by the sqlite3_extension_init() entry point. |
|
6788 */ |
|
6789 int sqlite3Fts2Init(sqlite3 *db){ |
|
6790 int rc = SQLITE_OK; |
|
6791 fts2Hash *pHash = 0; |
|
6792 const sqlite3_tokenizer_module *pSimple = 0; |
|
6793 const sqlite3_tokenizer_module *pPorter = 0; |
|
6794 const sqlite3_tokenizer_module *pIcu = 0; |
|
6795 |
|
6796 sqlite3Fts2SimpleTokenizerModule(&pSimple); |
|
6797 sqlite3Fts2PorterTokenizerModule(&pPorter); |
|
6798 #ifdef SQLITE_ENABLE_ICU |
|
6799 sqlite3Fts2IcuTokenizerModule(&pIcu); |
|
6800 #endif |
|
6801 |
|
6802 /* Allocate and initialise the hash-table used to store tokenizers. */ |
|
6803 pHash = sqlite3_malloc(sizeof(fts2Hash)); |
|
6804 if( !pHash ){ |
|
6805 rc = SQLITE_NOMEM; |
|
6806 }else{ |
|
6807 sqlite3Fts2HashInit(pHash, FTS2_HASH_STRING, 1); |
|
6808 } |
|
6809 |
|
6810 /* Load the built-in tokenizers into the hash table */ |
|
6811 if( rc==SQLITE_OK ){ |
|
6812 if( sqlite3Fts2HashInsert(pHash, "simple", 7, (void *)pSimple) |
|
6813 || sqlite3Fts2HashInsert(pHash, "porter", 7, (void *)pPorter) |
|
6814 || (pIcu && sqlite3Fts2HashInsert(pHash, "icu", 4, (void *)pIcu)) |
|
6815 ){ |
|
6816 rc = SQLITE_NOMEM; |
|
6817 } |
|
6818 } |
|
6819 |
|
6820 /* Create the virtual table wrapper around the hash-table and overload |
|
6821 ** the two scalar functions. If this is successful, register the |
|
6822 ** module with sqlite. |
|
6823 */ |
|
6824 if( SQLITE_OK==rc |
|
6825 && SQLITE_OK==(rc = sqlite3Fts2InitHashTable(db, pHash, "fts2_tokenizer")) |
|
6826 && SQLITE_OK==(rc = sqlite3_overload_function(db, "snippet", -1)) |
|
6827 && SQLITE_OK==(rc = sqlite3_overload_function(db, "offsets", -1)) |
|
6828 && SQLITE_OK==(rc = sqlite3_overload_function(db, "optimize", -1)) |
|
6829 #ifdef SQLITE_TEST |
|
6830 && SQLITE_OK==(rc = sqlite3_overload_function(db, "dump_terms", -1)) |
|
6831 && SQLITE_OK==(rc = sqlite3_overload_function(db, "dump_doclist", -1)) |
|
6832 #endif |
|
6833 ){ |
|
6834 return sqlite3_create_module_v2( |
|
6835 db, "fts2", &fts2Module, (void *)pHash, hashDestroy |
|
6836 ); |
|
6837 } |
|
6838 |
|
6839 /* An error has occured. Delete the hash table and return the error code. */ |
|
6840 assert( rc!=SQLITE_OK ); |
|
6841 if( pHash ){ |
|
6842 sqlite3Fts2HashClear(pHash); |
|
6843 sqlite3_free(pHash); |
|
6844 } |
|
6845 return rc; |
|
6846 } |
|
6847 |
|
6848 #if !SQLITE_CORE |
|
6849 int sqlite3_extension_init( |
|
6850 sqlite3 *db, |
|
6851 char **pzErrMsg, |
|
6852 const sqlite3_api_routines *pApi |
|
6853 ){ |
|
6854 SQLITE_EXTENSION_INIT2(pApi) |
|
6855 return sqlite3Fts2Init(db); |
|
6856 } |
|
6857 #endif |
|
6858 |
|
6859 #endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS2) */ |