1 package org.apache.lucene.index;
4 * Copyright 2004 The Apache Software Foundation
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
19 import java.io.IOException;
20 import java.io.PrintStream;
21 import java.util.ArrayList;
22 import java.util.Collections;
23 import java.util.List;
24 import java.util.Random;
25 import java.util.concurrent.atomic.AtomicBoolean;
27 import org.apache.lucene.analysis.MockAnalyzer;
28 import org.apache.lucene.analysis.WhitespaceAnalyzer;
29 import org.apache.lucene.document.Document;
30 import org.apache.lucene.document.Field;
31 import org.apache.lucene.document.Field.Index;
32 import org.apache.lucene.document.Field.Store;
33 import org.apache.lucene.search.TermQuery;
34 import org.apache.lucene.search.IndexSearcher;
35 import org.apache.lucene.search.Query;
36 import org.apache.lucene.search.TopDocs;
37 import org.apache.lucene.store.Directory;
38 import org.apache.lucene.store.MockDirectoryWrapper;
39 import org.apache.lucene.store.AlreadyClosedException;
40 import org.apache.lucene.store.RAMDirectory;
41 import org.apache.lucene.util.LuceneTestCase;
42 import org.apache.lucene.util._TestUtil;
43 import org.apache.lucene.util.ThreadInterruptedException;
44 import java.util.concurrent.atomic.AtomicInteger;
46 public class TestIndexWriterReader extends LuceneTestCase {
47 static PrintStream infoStream = VERBOSE ? System.out : null;
49 public static int count(Term t, IndexReader r) throws IOException {
51 TermDocs td = r.termDocs(t);
60 public void testAddCloseOpen() throws IOException {
61 Directory dir1 = newDirectory();
62 IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random));
64 IndexWriter writer = new IndexWriter(dir1, iwc);
65 for (int i = 0; i < 97 ; i++) {
66 IndexReader reader = writer.getReader();
68 writer.addDocument(DocHelper.createDocument(i, "x", 1 + random.nextInt(5)));
70 int previous = random.nextInt(i);
71 // a check if the reader is current here could fail since there might be
73 switch (random.nextInt(5)) {
77 writer.addDocument(DocHelper.createDocument(i, "x", 1 + random.nextInt(5)));
80 writer.updateDocument(new Term("id", "" + previous), DocHelper.createDocument(
81 previous, "x", 1 + random.nextInt(5)));
84 writer.deleteDocuments(new Term("id", "" + previous));
87 assertFalse(reader.isCurrent());
90 writer.forceMerge(1); // make sure all merging is done etc.
91 IndexReader reader = writer.getReader();
92 writer.commit(); // no changes that are not visible to the reader
93 assertTrue(reader.isCurrent());
95 assertTrue(reader.isCurrent()); // all changes are visible to the reader
96 iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random));
97 writer = new IndexWriter(dir1, iwc);
98 assertTrue(reader.isCurrent());
99 writer.addDocument(DocHelper.createDocument(1, "x", 1+random.nextInt(5)));
100 assertTrue(reader.isCurrent()); // segments in ram but IW is different to the readers one
102 assertFalse(reader.isCurrent()); // segments written
107 public void testUpdateDocument() throws Exception {
108 boolean doFullMerge = true;
110 Directory dir1 = newDirectory();
111 IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random));
112 if (iwc.getMaxBufferedDocs() < 20) {
113 iwc.setMaxBufferedDocs(20);
116 if (random.nextBoolean()) {
117 iwc.setMergePolicy(NoMergePolicy.NO_COMPOUND_FILES);
119 iwc.setMergePolicy(NoMergePolicy.COMPOUND_FILES);
121 IndexWriter writer = new IndexWriter(dir1, iwc);
124 createIndexNoClose(!doFullMerge, "index1", writer);
126 // writer.flush(false, true, true);
129 IndexReader r1 = writer.getReader();
130 assertTrue(r1.isCurrent());
132 String id10 = r1.document(10).getField("id").stringValue();
134 Document newDoc = r1.document(10);
135 newDoc.removeField("id");
136 newDoc.add(newField("id", Integer.toString(8000), Store.YES, Index.NOT_ANALYZED));
137 writer.updateDocument(new Term("id", id10), newDoc);
138 assertFalse(r1.isCurrent());
140 IndexReader r2 = writer.getReader();
141 assertTrue(r2.isCurrent());
142 assertEquals(0, count(new Term("id", id10), r2));
143 assertEquals(1, count(new Term("id", Integer.toString(8000)), r2));
147 assertTrue(r2.isCurrent());
149 IndexReader r3 = IndexReader.open(dir1, true);
150 assertTrue(r3.isCurrent());
151 assertTrue(r2.isCurrent());
152 assertEquals(0, count(new Term("id", id10), r3));
153 assertEquals(1, count(new Term("id", Integer.toString(8000)), r3));
155 writer = new IndexWriter(dir1, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
156 Document doc = new Document();
157 doc.add(newField("field", "a b c", Field.Store.NO, Field.Index.ANALYZED));
158 writer.addDocument(doc);
159 assertTrue(r2.isCurrent());
160 assertTrue(r3.isCurrent());
164 assertFalse(r2.isCurrent());
165 assertTrue(!r3.isCurrent());
173 public void testIsCurrent() throws IOException {
174 Directory dir = newDirectory();
175 IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random));
177 IndexWriter writer = new IndexWriter(dir, iwc);
178 Document doc = new Document();
179 doc.add(newField("field", "a b c", Field.Store.NO, Field.Index.ANALYZED));
180 writer.addDocument(doc);
183 iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random));
184 writer = new IndexWriter(dir, iwc);
185 doc = new Document();
186 doc.add(newField("field", "a b c", Field.Store.NO, Field.Index.ANALYZED));
187 IndexReader nrtReader = writer.getReader();
188 assertTrue(nrtReader.isCurrent());
189 writer.addDocument(doc);
190 assertFalse(nrtReader.isCurrent()); // should see the changes
191 writer.forceMerge(1); // make sure we don't have a merge going on
192 assertFalse(nrtReader.isCurrent());
195 IndexReader dirReader = IndexReader.open(dir);
196 nrtReader = writer.getReader();
198 assertTrue(dirReader.isCurrent());
199 assertTrue(nrtReader.isCurrent()); // nothing was committed yet so we are still current
200 assertEquals(2, nrtReader.maxDoc()); // sees the actual document added
201 assertEquals(1, dirReader.maxDoc());
202 writer.close(); // close is actually a commit both should see the changes
203 assertTrue(nrtReader.isCurrent());
204 assertFalse(dirReader.isCurrent()); // this reader has been opened before the writer was closed / committed
212 * Test using IW.addIndexes
216 public void testAddIndexes() throws Exception {
217 boolean doFullMerge = false;
219 Directory dir1 = newDirectory();
220 IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random));
221 if (iwc.getMaxBufferedDocs() < 20) {
222 iwc.setMaxBufferedDocs(20);
225 if (random.nextBoolean()) {
226 iwc.setMergePolicy(NoMergePolicy.NO_COMPOUND_FILES);
228 iwc.setMergePolicy(NoMergePolicy.COMPOUND_FILES);
230 IndexWriter writer = new IndexWriter(dir1, iwc);
232 writer.setInfoStream(infoStream);
234 createIndexNoClose(!doFullMerge, "index1", writer);
235 writer.flush(false, true);
237 // create a 2nd index
238 Directory dir2 = newDirectory();
239 IndexWriter writer2 = new IndexWriter(dir2, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
240 writer2.setInfoStream(infoStream);
241 createIndexNoClose(!doFullMerge, "index2", writer2);
244 IndexReader r0 = writer.getReader();
245 assertTrue(r0.isCurrent());
246 writer.addIndexes(new Directory[] { dir2 });
247 assertFalse(r0.isCurrent());
250 IndexReader r1 = writer.getReader();
251 assertTrue(r1.isCurrent());
254 assertTrue(r1.isCurrent()); // we have seen all changes - no change after opening the NRT reader
256 assertEquals(200, r1.maxDoc());
258 int index2df = r1.docFreq(new Term("indexname", "index2"));
260 assertEquals(100, index2df);
262 // verify the docs are from different indexes
263 Document doc5 = r1.document(5);
264 assertEquals("index1", doc5.get("indexname"));
265 Document doc150 = r1.document(150);
266 assertEquals("index2", doc150.get("indexname"));
273 public void testAddIndexes2() throws Exception {
274 boolean doFullMerge = false;
276 Directory dir1 = newDirectory();
277 IndexWriter writer = new IndexWriter(dir1, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
278 writer.setInfoStream(infoStream);
280 // create a 2nd index
281 Directory dir2 = newDirectory();
282 IndexWriter writer2 = new IndexWriter(dir2, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
283 writer2.setInfoStream(infoStream);
284 createIndexNoClose(!doFullMerge, "index2", writer2);
287 writer.addIndexes(new Directory[] { dir2 });
288 writer.addIndexes(new Directory[] { dir2 });
289 writer.addIndexes(new Directory[] { dir2 });
290 writer.addIndexes(new Directory[] { dir2 });
291 writer.addIndexes(new Directory[] { dir2 });
293 IndexReader r1 = writer.getReader();
294 assertEquals(500, r1.maxDoc());
303 * Deletes using IW.deleteDocuments
307 public void testDeleteFromIndexWriter() throws Exception {
308 boolean doFullMerge = true;
310 Directory dir1 = newDirectory();
311 IndexWriter writer = new IndexWriter(dir1, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setReaderTermsIndexDivisor(2));
312 writer.setInfoStream(infoStream);
314 createIndexNoClose(!doFullMerge, "index1", writer);
315 writer.flush(false, true);
317 IndexReader r1 = writer.getReader();
319 String id10 = r1.document(10).getField("id").stringValue();
321 // deleted IW docs should not show up in the next getReader
322 writer.deleteDocuments(new Term("id", id10));
323 IndexReader r2 = writer.getReader();
324 assertEquals(1, count(new Term("id", id10), r1));
325 assertEquals(0, count(new Term("id", id10), r2));
327 String id50 = r1.document(50).getField("id").stringValue();
328 assertEquals(1, count(new Term("id", id50), r1));
330 writer.deleteDocuments(new Term("id", id50));
332 IndexReader r3 = writer.getReader();
333 assertEquals(0, count(new Term("id", id10), r3));
334 assertEquals(0, count(new Term("id", id50), r3));
336 String id75 = r1.document(75).getField("id").stringValue();
337 writer.deleteDocuments(new TermQuery(new Term("id", id75)));
338 IndexReader r4 = writer.getReader();
339 assertEquals(1, count(new Term("id", id75), r3));
340 assertEquals(0, count(new Term("id", id75), r4));
348 // reopen the writer to verify the delete made it to the directory
349 writer = new IndexWriter(dir1, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
350 writer.setInfoStream(infoStream);
351 IndexReader w2r1 = writer.getReader();
352 assertEquals(0, count(new Term("id", id10), w2r1));
358 public void testAddIndexesAndDoDeletesThreads() throws Throwable {
359 final int numIter = 2;
362 Directory mainDir = newDirectory();
363 IndexWriter mainWriter = new IndexWriter(mainDir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy()));
364 _TestUtil.reduceOpenFiles(mainWriter);
366 mainWriter.setInfoStream(infoStream);
367 AddDirectoriesThreads addDirThreads = new AddDirectoriesThreads(numIter, mainWriter);
368 addDirThreads.launchThreads(numDirs);
369 addDirThreads.joinThreads();
371 //assertEquals(100 + numDirs * (3 * numIter / 4) * addDirThreads.NUM_THREADS
372 // * addDirThreads.NUM_INIT_DOCS, addDirThreads.mainWriter.numDocs());
373 assertEquals(addDirThreads.count.intValue(), addDirThreads.mainWriter.numDocs());
375 addDirThreads.close(true);
377 assertTrue(addDirThreads.failures.size() == 0);
379 _TestUtil.checkIndex(mainDir);
381 IndexReader reader = IndexReader.open(mainDir, true);
382 assertEquals(addDirThreads.count.intValue(), reader.numDocs());
383 //assertEquals(100 + numDirs * (3 * numIter / 4) * addDirThreads.NUM_THREADS
384 // * addDirThreads.NUM_INIT_DOCS, reader.numDocs());
387 addDirThreads.closeDir();
391 private class AddDirectoriesThreads {
393 final static int NUM_THREADS = 5;
394 final static int NUM_INIT_DOCS = 100;
396 final Thread[] threads = new Thread[NUM_THREADS];
397 IndexWriter mainWriter;
398 final List<Throwable> failures = new ArrayList<Throwable>();
399 IndexReader[] readers;
400 boolean didClose = false;
401 AtomicInteger count = new AtomicInteger(0);
402 AtomicInteger numaddIndexes = new AtomicInteger(0);
404 public AddDirectoriesThreads(int numDirs, IndexWriter mainWriter) throws Throwable {
405 this.numDirs = numDirs;
406 this.mainWriter = mainWriter;
407 addDir = newDirectory();
408 IndexWriter writer = new IndexWriter(addDir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(2));
409 for (int i = 0; i < NUM_INIT_DOCS; i++) {
410 Document doc = DocHelper.createDocument(i, "addindex", 4);
411 writer.addDocument(doc);
416 readers = new IndexReader[numDirs];
417 for (int i = 0; i < numDirs; i++)
418 readers[i] = IndexReader.open(addDir, false);
422 for (int i = 0; i < NUM_THREADS; i++)
425 } catch (InterruptedException ie) {
426 throw new ThreadInterruptedException(ie);
430 void close(boolean doWait) throws Throwable {
433 mainWriter.waitForMerges();
435 mainWriter.close(doWait);
438 void closeDir() throws Throwable {
439 for (int i = 0; i < numDirs; i++)
444 void handle(Throwable t) {
445 t.printStackTrace(System.out);
446 synchronized (failures) {
451 void launchThreads(final int numIter) {
452 for (int i = 0; i < NUM_THREADS; i++) {
453 threads[i] = new Thread() {
457 final Directory[] dirs = new Directory[numDirs];
458 for (int k = 0; k < numDirs; k++)
459 dirs[k] = new MockDirectoryWrapper(random, new RAMDirectory(addDir));
462 // System.out.println(Thread.currentThread().getName() + ": iter
464 for (int x=0; x < numIter; x++) {
465 // only do addIndexes
468 //if (numIter > 0 && j == numIter)
473 } catch (Throwable t) {
479 for (int i = 0; i < NUM_THREADS; i++)
483 void doBody(int j, Directory[] dirs) throws Throwable {
486 mainWriter.addIndexes(dirs);
487 mainWriter.forceMerge(1);
490 mainWriter.addIndexes(dirs);
491 numaddIndexes.incrementAndGet();
494 mainWriter.addIndexes(readers);
499 count.addAndGet(dirs.length*NUM_INIT_DOCS);
503 public void testIndexWriterReopenSegmentFullMerge() throws Exception {
504 doTestIndexWriterReopenSegment(true);
507 public void testIndexWriterReopenSegment() throws Exception {
508 doTestIndexWriterReopenSegment(false);
512 * Tests creating a segment, then check to insure the segment can be seen via
515 public void doTestIndexWriterReopenSegment(boolean doFullMerge) throws Exception {
516 Directory dir1 = newDirectory();
517 IndexWriter writer = new IndexWriter(dir1, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
518 writer.setInfoStream(infoStream);
519 IndexReader r1 = writer.getReader();
520 assertEquals(0, r1.maxDoc());
521 createIndexNoClose(false, "index1", writer);
522 writer.flush(!doFullMerge, true);
524 IndexReader iwr1 = writer.getReader();
525 assertEquals(100, iwr1.maxDoc());
527 IndexReader r2 = writer.getReader();
528 assertEquals(r2.maxDoc(), 100);
530 for (int x = 10000; x < 10000 + 100; x++) {
531 Document d = DocHelper.createDocument(x, "index1", 5);
532 writer.addDocument(d);
534 writer.flush(false, true);
535 // verify the reader was reopened internally
536 IndexReader iwr2 = writer.getReader();
537 assertTrue(iwr2 != r1);
538 assertEquals(200, iwr2.maxDoc());
539 // should have flushed out a segment
540 IndexReader r3 = writer.getReader();
541 assertTrue(r2 != r3);
542 assertEquals(200, r3.maxDoc());
544 // dec ref the readers rather than close them because
545 // closing flushes changes to the writer
553 // test whether the changes made it to the directory
554 writer = new IndexWriter(dir1, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
555 IndexReader w2r1 = writer.getReader();
556 // insure the deletes were actually flushed to the directory
557 assertEquals(200, w2r1.maxDoc());
565 * Delete a document by term and return the doc id
567 * public static int deleteDocument(Term term, IndexWriter writer) throws
568 * IOException { IndexReader reader = writer.getReader(); TermDocs td =
569 * reader.termDocs(term); int doc = -1; //if (td.next()) { // doc = td.doc();
570 * //} //writer.deleteDocuments(term); td.close(); return doc; }
573 public static void createIndex(Random random, Directory dir1, String indexName,
574 boolean multiSegment) throws IOException {
575 IndexWriter w = new IndexWriter(dir1, LuceneTestCase.newIndexWriterConfig(random,
576 TEST_VERSION_CURRENT, new MockAnalyzer(random))
577 .setMergePolicy(new LogDocMergePolicy()));
578 for (int i = 0; i < 100; i++) {
579 w.addDocument(DocHelper.createDocument(i, indexName, 4));
580 if (multiSegment && (i % 10) == 0) {
589 public static void createIndexNoClose(boolean multiSegment, String indexName,
590 IndexWriter w) throws IOException {
591 for (int i = 0; i < 100; i++) {
592 w.addDocument(DocHelper.createDocument(i, indexName, 4));
599 private static class MyWarmer extends IndexWriter.IndexReaderWarmer {
602 public void warm(IndexReader reader) throws IOException {
607 public void testMergeWarmer() throws Exception {
609 Directory dir1 = newDirectory();
611 MyWarmer warmer = new MyWarmer();
612 IndexWriter writer = new IndexWriter(
614 newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
615 setMaxBufferedDocs(2).
616 setMergedSegmentWarmer(warmer).
617 setMergeScheduler(new ConcurrentMergeScheduler()).
618 setMergePolicy(newLogMergePolicy())
620 writer.setInfoStream(infoStream);
623 createIndexNoClose(false, "test", writer);
625 // get a reader to put writer into near real-time mode
626 IndexReader r1 = writer.getReader();
628 ((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(2);
630 int num = atLeast(100);
631 for (int i = 0; i < num; i++) {
632 writer.addDocument(DocHelper.createDocument(i, "test", 4));
634 ((ConcurrentMergeScheduler) writer.getConfig().getMergeScheduler()).sync();
636 assertTrue(warmer.warmCount > 0);
637 final int count = warmer.warmCount;
639 writer.addDocument(DocHelper.createDocument(17, "test", 4));
640 writer.forceMerge(1);
641 assertTrue(warmer.warmCount > count);
648 public void testAfterCommit() throws Exception {
649 Directory dir1 = newDirectory();
650 IndexWriter writer = new IndexWriter(dir1, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergeScheduler(new ConcurrentMergeScheduler()));
652 writer.setInfoStream(infoStream);
655 createIndexNoClose(false, "test", writer);
657 // get a reader to put writer into near real-time mode
658 IndexReader r1 = writer.getReader();
659 _TestUtil.checkIndex(dir1);
661 _TestUtil.checkIndex(dir1);
662 assertEquals(100, r1.numDocs());
664 for (int i = 0; i < 10; i++) {
665 writer.addDocument(DocHelper.createDocument(i, "test", 4));
667 ((ConcurrentMergeScheduler) writer.getConfig().getMergeScheduler()).sync();
669 IndexReader r2 = IndexReader.openIfChanged(r1);
674 assertEquals(110, r1.numDocs());
680 // Make sure reader remains usable even if IndexWriter closes
681 public void testAfterClose() throws Exception {
682 Directory dir1 = newDirectory();
683 IndexWriter writer = new IndexWriter(dir1, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
684 writer.setInfoStream(infoStream);
687 createIndexNoClose(false, "test", writer);
689 IndexReader r = writer.getReader();
692 _TestUtil.checkIndex(dir1);
694 // reader should remain usable even after IndexWriter is closed:
695 assertEquals(100, r.numDocs());
696 Query q = new TermQuery(new Term("indexname", "test"));
697 IndexSearcher searcher = newSearcher(r);
698 assertEquals(100, searcher.search(q, 10).totalHits);
701 IndexReader.openIfChanged(r);
702 fail("failed to hit AlreadyClosedException");
703 } catch (AlreadyClosedException ace) {
710 // Stress test reopen during addIndexes
711 public void testDuringAddIndexes() throws Exception {
712 MockDirectoryWrapper dir1 = newDirectory();
713 final IndexWriter writer = new IndexWriter(
715 newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).
716 setMergePolicy(newLogMergePolicy(2))
718 writer.setInfoStream(infoStream);
719 ((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(2);
722 createIndexNoClose(false, "test", writer);
725 final Directory[] dirs = new Directory[10];
726 for (int i=0;i<10;i++) {
727 dirs[i] = new MockDirectoryWrapper(random, new RAMDirectory(dir1));
730 IndexReader r = writer.getReader();
732 final int NUM_THREAD = 5;
733 final float SECONDS = 0.5f;
735 final long endTime = (long) (System.currentTimeMillis() + 1000.*SECONDS);
736 final List<Throwable> excs = Collections.synchronizedList(new ArrayList<Throwable>());
738 final Thread[] threads = new Thread[NUM_THREAD];
739 for(int i=0;i<NUM_THREAD;i++) {
740 threads[i] = new Thread() {
745 writer.addIndexes(dirs);
747 } catch (Throwable t) {
749 throw new RuntimeException(t);
751 } while(System.currentTimeMillis() < endTime);
754 threads[i].setDaemon(true);
759 while(System.currentTimeMillis() < endTime) {
760 IndexReader r2 = IndexReader.openIfChanged(r);
765 Query q = new TermQuery(new Term("indexname", "test"));
766 IndexSearcher searcher = newSearcher(r);
767 final int count = searcher.search(q, 10).totalHits;
769 assertTrue(count >= lastCount);
773 for(int i=0;i<NUM_THREAD;i++) {
777 IndexReader r2 = IndexReader.openIfChanged(r);
782 Query q = new TermQuery(new Term("indexname", "test"));
783 IndexSearcher searcher = newSearcher(r);
784 final int count = searcher.search(q, 10).totalHits;
786 assertTrue(count >= lastCount);
788 assertEquals(0, excs.size());
790 assertEquals(0, dir1.getOpenDeletedFiles().size());
797 // Stress test reopen during add/delete
798 public void testDuringAddDelete() throws Exception {
799 Directory dir1 = newDirectory();
800 final IndexWriter writer = new IndexWriter(
802 newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
803 setMergePolicy(newLogMergePolicy(2))
805 writer.setInfoStream(infoStream);
806 ((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(2);
809 createIndexNoClose(false, "test", writer);
812 IndexReader r = writer.getReader();
814 final int NUM_THREAD = 5;
815 final float SECONDS = 0.5f;
817 final long endTime = (long) (System.currentTimeMillis() + 1000.*SECONDS);
818 final List<Throwable> excs = Collections.synchronizedList(new ArrayList<Throwable>());
820 final Thread[] threads = new Thread[NUM_THREAD];
821 for(int i=0;i<NUM_THREAD;i++) {
822 threads[i] = new Thread() {
823 final Random r = new Random(random.nextLong());
830 for(int docUpto=0;docUpto<10;docUpto++) {
831 writer.addDocument(DocHelper.createDocument(10*count+docUpto, "test", 4));
834 final int limit = count*10;
835 for(int delUpto=0;delUpto<5;delUpto++) {
836 int x = r.nextInt(limit);
837 writer.deleteDocuments(new Term("field3", "b"+x));
839 } catch (Throwable t) {
841 throw new RuntimeException(t);
843 } while(System.currentTimeMillis() < endTime);
846 threads[i].setDaemon(true);
851 while(System.currentTimeMillis() < endTime) {
852 IndexReader r2 = IndexReader.openIfChanged(r);
857 Query q = new TermQuery(new Term("indexname", "test"));
858 IndexSearcher searcher = newSearcher(r);
859 sum += searcher.search(q, 10).totalHits;
863 for(int i=0;i<NUM_THREAD;i++) {
866 // at least search once
867 IndexReader r2 = IndexReader.openIfChanged(r);
872 Query q = new TermQuery(new Term("indexname", "test"));
873 IndexSearcher searcher = newSearcher(r);
874 sum += searcher.search(q, 10).totalHits;
876 assertTrue("no documents found at all", sum > 0);
878 assertEquals(0, excs.size());
885 public void testForceMergeDeletes() throws Throwable {
886 Directory dir = newDirectory();
887 final IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy()));
888 Document doc = new Document();
889 doc.add(newField("field", "a b c", Field.Store.NO, Field.Index.ANALYZED));
890 Field id = newField("id", "", Field.Store.NO, Field.Index.NOT_ANALYZED);
896 w.deleteDocuments(new Term("id", "0"));
898 IndexReader r = w.getReader();
899 w.forceMergeDeletes();
902 r = IndexReader.open(dir, true);
903 assertEquals(1, r.numDocs());
904 assertFalse(r.hasDeletions());
909 public void testDeletesNumDocs() throws Throwable {
910 Directory dir = newDirectory();
911 final IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
912 Document doc = new Document();
913 doc.add(newField("field", "a b c", Field.Store.NO, Field.Index.ANALYZED));
914 Field id = newField("id", "", Field.Store.NO, Field.Index.NOT_ANALYZED);
920 IndexReader r = w.getReader();
921 assertEquals(2, r.numDocs());
924 w.deleteDocuments(new Term("id", "0"));
926 assertEquals(1, r.numDocs());
929 w.deleteDocuments(new Term("id", "1"));
931 assertEquals(0, r.numDocs());
938 public void testEmptyIndex() throws Exception {
939 // Ensures that getReader works on an empty index, which hasn't been committed yet.
940 Directory dir = newDirectory();
941 IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
942 IndexReader r = w.getReader();
943 assertEquals(0, r.numDocs());
949 public void testSegmentWarmer() throws Exception {
950 Directory dir = newDirectory();
951 final AtomicBoolean didWarm = new AtomicBoolean();
952 IndexWriter w = new IndexWriter(
954 newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).
955 setMaxBufferedDocs(2).
956 setReaderPooling(true).
957 setMergedSegmentWarmer(new IndexWriter.IndexReaderWarmer() {
959 public void warm(IndexReader r) throws IOException {
960 IndexSearcher s = newSearcher(r);
961 TopDocs hits = s.search(new TermQuery(new Term("foo", "bar")), 10);
962 assertEquals(20, hits.totalHits);
967 setMergePolicy(newLogMergePolicy(10))
970 Document doc = new Document();
971 doc.add(newField("foo", "bar", Field.Store.YES, Field.Index.NOT_ANALYZED));
972 for(int i=0;i<20;i++) {
978 assertTrue(didWarm.get());
981 public void testNoTermsIndex() throws Exception {
982 Directory dir = newDirectory();
983 IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(
984 TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT))
985 .setReaderTermsIndexDivisor(-1));
986 Document doc = new Document();
987 doc.add(new Field("f", "val", Store.NO, Index.ANALYZED));
989 IndexReader r = IndexReader.open(w, true);
991 r.termDocs(new Term("f", "val"));
992 fail("should have failed to seek since terms index was not loaded");
993 } catch (IllegalStateException e) {
994 // expected - we didn't load the term index
1002 public void testReopenAfterNoRealChange() throws Exception {
1003 Directory d = newDirectory();
1004 IndexWriter w = new IndexWriter(
1006 newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
1007 w.setInfoStream(VERBOSE ? System.out : null);
1009 IndexReader r = w.getReader(); // start pooling readers
1011 IndexReader r2 = IndexReader.openIfChanged(r);
1014 w.addDocument(new Document());
1015 IndexReader r3 = IndexReader.openIfChanged(r);
1017 assertTrue(r3.getVersion() != r.getVersion());
1018 assertTrue(r3.isCurrent());
1020 // Deletes nothing in reality...:
1021 w.deleteDocuments(new Term("foo", "bar"));
1023 // ... but IW marks this as not current:
1024 assertFalse(r3.isCurrent());
1025 IndexReader r4 = IndexReader.openIfChanged(r3);
1028 // Deletes nothing in reality...:
1029 w.deleteDocuments(new Term("foo", "bar"));
1030 IndexReader r5 = IndexReader.openIfChanged(r3, w, true);