1 package org.apache.lucene.index;
4 * Copyright 2004 The Apache Software Foundation
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
19 import java.io.IOException;
20 import java.io.PrintStream;
21 import java.util.ArrayList;
22 import java.util.Collections;
23 import java.util.List;
24 import java.util.Random;
25 import java.util.concurrent.atomic.AtomicBoolean;
27 import org.apache.lucene.analysis.MockAnalyzer;
28 import org.apache.lucene.analysis.WhitespaceAnalyzer;
29 import org.apache.lucene.document.Document;
30 import org.apache.lucene.document.Field;
31 import org.apache.lucene.document.Field.Index;
32 import org.apache.lucene.document.Field.Store;
33 import org.apache.lucene.document.Field.TermVector;
34 import org.apache.lucene.search.TermQuery;
35 import org.apache.lucene.search.IndexSearcher;
36 import org.apache.lucene.search.Query;
37 import org.apache.lucene.search.TopDocs;
38 import org.apache.lucene.store.Directory;
39 import org.apache.lucene.store.MockDirectoryWrapper;
40 import org.apache.lucene.store.AlreadyClosedException;
41 import org.apache.lucene.store.RAMDirectory;
42 import org.apache.lucene.util.LuceneTestCase;
43 import org.apache.lucene.util._TestUtil;
44 import org.apache.lucene.util.ThreadInterruptedException;
45 import java.util.concurrent.atomic.AtomicInteger;
47 public class TestIndexWriterReader extends LuceneTestCase {
48 static PrintStream infoStream = VERBOSE ? System.out : null;
50 public static int count(Term t, IndexReader r) throws IOException {
52 TermDocs td = r.termDocs(t);
61 public void testAddCloseOpen() throws IOException {
62 Directory dir1 = newDirectory();
63 IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random));
65 IndexWriter writer = new IndexWriter(dir1, iwc);
66 for (int i = 0; i < 97 ; i++) {
67 IndexReader reader = writer.getReader();
69 writer.addDocument(createDocument(i, "x", 1 + random.nextInt(5)));
71 int previous = random.nextInt(i);
72 // a check if the reader is current here could fail since there might be
74 switch (random.nextInt(5)) {
78 writer.addDocument(createDocument(i, "x", 1 + random.nextInt(5)));
81 writer.updateDocument(new Term("id", "" + previous), createDocument(
82 previous, "x", 1 + random.nextInt(5)));
85 writer.deleteDocuments(new Term("id", "" + previous));
88 assertFalse(reader.isCurrent());
91 writer.optimize(); // make sure all merging is done etc.
92 IndexReader reader = writer.getReader();
93 writer.commit(); // no changes that are not visible to the reader
94 assertTrue(reader.isCurrent());
96 assertTrue(reader.isCurrent()); // all changes are visible to the reader
97 iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random));
98 writer = new IndexWriter(dir1, iwc);
99 assertTrue(reader.isCurrent());
100 writer.addDocument(createDocument(1, "x", 1+random.nextInt(5)));
101 assertTrue(reader.isCurrent()); // segments in ram but IW is different to the readers one
103 assertFalse(reader.isCurrent()); // segments written
108 public void testUpdateDocument() throws Exception {
109 boolean optimize = true;
111 Directory dir1 = newDirectory();
112 IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random));
113 if (iwc.getMaxBufferedDocs() < 20) {
114 iwc.setMaxBufferedDocs(20);
117 if (random.nextBoolean()) {
118 iwc.setMergePolicy(NoMergePolicy.NO_COMPOUND_FILES);
120 iwc.setMergePolicy(NoMergePolicy.COMPOUND_FILES);
122 IndexWriter writer = new IndexWriter(dir1, iwc);
125 createIndexNoClose(!optimize, "index1", writer);
127 // writer.flush(false, true, true);
130 IndexReader r1 = writer.getReader();
131 assertTrue(r1.isCurrent());
133 String id10 = r1.document(10).getField("id").stringValue();
135 Document newDoc = r1.document(10);
136 newDoc.removeField("id");
137 newDoc.add(newField("id", Integer.toString(8000), Store.YES, Index.NOT_ANALYZED));
138 writer.updateDocument(new Term("id", id10), newDoc);
139 assertFalse(r1.isCurrent());
141 IndexReader r2 = writer.getReader();
142 assertTrue(r2.isCurrent());
143 assertEquals(0, count(new Term("id", id10), r2));
144 assertEquals(1, count(new Term("id", Integer.toString(8000)), r2));
148 assertTrue(r2.isCurrent());
150 IndexReader r3 = IndexReader.open(dir1, true);
151 assertTrue(r3.isCurrent());
152 assertTrue(r2.isCurrent());
153 assertEquals(0, count(new Term("id", id10), r3));
154 assertEquals(1, count(new Term("id", Integer.toString(8000)), r3));
156 writer = new IndexWriter(dir1, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
157 Document doc = new Document();
158 doc.add(newField("field", "a b c", Field.Store.NO, Field.Index.ANALYZED));
159 writer.addDocument(doc);
160 assertTrue(r2.isCurrent());
161 assertTrue(r3.isCurrent());
165 assertFalse(r2.isCurrent());
166 assertTrue(!r3.isCurrent());
174 public void testIsCurrent() throws IOException {
175 Directory dir = newDirectory();
176 IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random));
178 IndexWriter writer = new IndexWriter(dir, iwc);
179 Document doc = new Document();
180 doc.add(newField("field", "a b c", Field.Store.NO, Field.Index.ANALYZED));
181 writer.addDocument(doc);
184 iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random));
185 writer = new IndexWriter(dir, iwc);
186 doc = new Document();
187 doc.add(newField("field", "a b c", Field.Store.NO, Field.Index.ANALYZED));
188 IndexReader nrtReader = writer.getReader();
189 assertTrue(nrtReader.isCurrent());
190 writer.addDocument(doc);
191 assertFalse(nrtReader.isCurrent()); // should see the changes
192 writer.optimize(); // make sure we don't have a merge going on
193 assertFalse(nrtReader.isCurrent());
196 IndexReader dirReader = IndexReader.open(dir);
197 nrtReader = writer.getReader();
199 assertTrue(dirReader.isCurrent());
200 assertTrue(nrtReader.isCurrent()); // nothing was committed yet so we are still current
201 assertEquals(2, nrtReader.maxDoc()); // sees the actual document added
202 assertEquals(1, dirReader.maxDoc());
203 writer.close(); // close is actually a commit both should see the changes
204 assertTrue(nrtReader.isCurrent());
205 assertFalse(dirReader.isCurrent()); // this reader has been opened before the writer was closed / committed
213 * Test using IW.addIndexes
217 public void testAddIndexes() throws Exception {
218 boolean optimize = false;
220 Directory dir1 = newDirectory();
221 IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random));
222 if (iwc.getMaxBufferedDocs() < 20) {
223 iwc.setMaxBufferedDocs(20);
226 if (random.nextBoolean()) {
227 iwc.setMergePolicy(NoMergePolicy.NO_COMPOUND_FILES);
229 iwc.setMergePolicy(NoMergePolicy.COMPOUND_FILES);
231 IndexWriter writer = new IndexWriter(dir1, iwc);
233 writer.setInfoStream(infoStream);
235 createIndexNoClose(!optimize, "index1", writer);
236 writer.flush(false, true);
238 // create a 2nd index
239 Directory dir2 = newDirectory();
240 IndexWriter writer2 = new IndexWriter(dir2, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
241 writer2.setInfoStream(infoStream);
242 createIndexNoClose(!optimize, "index2", writer2);
245 IndexReader r0 = writer.getReader();
246 assertTrue(r0.isCurrent());
247 writer.addIndexes(new Directory[] { dir2 });
248 assertFalse(r0.isCurrent());
251 IndexReader r1 = writer.getReader();
252 assertTrue(r1.isCurrent());
255 assertTrue(r1.isCurrent()); // we have seen all changes - no change after opening the NRT reader
257 assertEquals(200, r1.maxDoc());
259 int index2df = r1.docFreq(new Term("indexname", "index2"));
261 assertEquals(100, index2df);
263 // verify the docs are from different indexes
264 Document doc5 = r1.document(5);
265 assertEquals("index1", doc5.get("indexname"));
266 Document doc150 = r1.document(150);
267 assertEquals("index2", doc150.get("indexname"));
274 public void testAddIndexes2() throws Exception {
275 boolean optimize = false;
277 Directory dir1 = newDirectory();
278 IndexWriter writer = new IndexWriter(dir1, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
279 writer.setInfoStream(infoStream);
281 // create a 2nd index
282 Directory dir2 = newDirectory();
283 IndexWriter writer2 = new IndexWriter(dir2, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
284 writer2.setInfoStream(infoStream);
285 createIndexNoClose(!optimize, "index2", writer2);
288 writer.addIndexes(new Directory[] { dir2 });
289 writer.addIndexes(new Directory[] { dir2 });
290 writer.addIndexes(new Directory[] { dir2 });
291 writer.addIndexes(new Directory[] { dir2 });
292 writer.addIndexes(new Directory[] { dir2 });
294 IndexReader r1 = writer.getReader();
295 assertEquals(500, r1.maxDoc());
304 * Deletes using IW.deleteDocuments
308 public void testDeleteFromIndexWriter() throws Exception {
309 boolean optimize = true;
311 Directory dir1 = newDirectory();
312 IndexWriter writer = new IndexWriter(dir1, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setReaderTermsIndexDivisor(2));
313 writer.setInfoStream(infoStream);
315 createIndexNoClose(!optimize, "index1", writer);
316 writer.flush(false, true);
318 IndexReader r1 = writer.getReader();
320 String id10 = r1.document(10).getField("id").stringValue();
322 // deleted IW docs should not show up in the next getReader
323 writer.deleteDocuments(new Term("id", id10));
324 IndexReader r2 = writer.getReader();
325 assertEquals(1, count(new Term("id", id10), r1));
326 assertEquals(0, count(new Term("id", id10), r2));
328 String id50 = r1.document(50).getField("id").stringValue();
329 assertEquals(1, count(new Term("id", id50), r1));
331 writer.deleteDocuments(new Term("id", id50));
333 IndexReader r3 = writer.getReader();
334 assertEquals(0, count(new Term("id", id10), r3));
335 assertEquals(0, count(new Term("id", id50), r3));
337 String id75 = r1.document(75).getField("id").stringValue();
338 writer.deleteDocuments(new TermQuery(new Term("id", id75)));
339 IndexReader r4 = writer.getReader();
340 assertEquals(1, count(new Term("id", id75), r3));
341 assertEquals(0, count(new Term("id", id75), r4));
349 // reopen the writer to verify the delete made it to the directory
350 writer = new IndexWriter(dir1, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
351 writer.setInfoStream(infoStream);
352 IndexReader w2r1 = writer.getReader();
353 assertEquals(0, count(new Term("id", id10), w2r1));
359 public void testAddIndexesAndDoDeletesThreads() throws Throwable {
360 final int numIter = 2;
363 Directory mainDir = newDirectory();
364 IndexWriter mainWriter = new IndexWriter(mainDir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy()));
365 _TestUtil.reduceOpenFiles(mainWriter);
367 mainWriter.setInfoStream(infoStream);
368 AddDirectoriesThreads addDirThreads = new AddDirectoriesThreads(numIter, mainWriter);
369 addDirThreads.launchThreads(numDirs);
370 addDirThreads.joinThreads();
372 //assertEquals(100 + numDirs * (3 * numIter / 4) * addDirThreads.NUM_THREADS
373 // * addDirThreads.NUM_INIT_DOCS, addDirThreads.mainWriter.numDocs());
374 assertEquals(addDirThreads.count.intValue(), addDirThreads.mainWriter.numDocs());
376 addDirThreads.close(true);
378 assertTrue(addDirThreads.failures.size() == 0);
380 _TestUtil.checkIndex(mainDir);
382 IndexReader reader = IndexReader.open(mainDir, true);
383 assertEquals(addDirThreads.count.intValue(), reader.numDocs());
384 //assertEquals(100 + numDirs * (3 * numIter / 4) * addDirThreads.NUM_THREADS
385 // * addDirThreads.NUM_INIT_DOCS, reader.numDocs());
388 addDirThreads.closeDir();
392 private class AddDirectoriesThreads {
394 final static int NUM_THREADS = 5;
395 final static int NUM_INIT_DOCS = 100;
397 final Thread[] threads = new Thread[NUM_THREADS];
398 IndexWriter mainWriter;
399 final List<Throwable> failures = new ArrayList<Throwable>();
400 IndexReader[] readers;
401 boolean didClose = false;
402 AtomicInteger count = new AtomicInteger(0);
403 AtomicInteger numaddIndexes = new AtomicInteger(0);
405 public AddDirectoriesThreads(int numDirs, IndexWriter mainWriter) throws Throwable {
406 this.numDirs = numDirs;
407 this.mainWriter = mainWriter;
408 addDir = newDirectory();
409 IndexWriter writer = new IndexWriter(addDir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(2));
410 for (int i = 0; i < NUM_INIT_DOCS; i++) {
411 Document doc = createDocument(i, "addindex", 4);
412 writer.addDocument(doc);
417 readers = new IndexReader[numDirs];
418 for (int i = 0; i < numDirs; i++)
419 readers[i] = IndexReader.open(addDir, false);
423 for (int i = 0; i < NUM_THREADS; i++)
426 } catch (InterruptedException ie) {
427 throw new ThreadInterruptedException(ie);
431 void close(boolean doWait) throws Throwable {
434 mainWriter.waitForMerges();
436 mainWriter.close(doWait);
439 void closeDir() throws Throwable {
440 for (int i = 0; i < numDirs; i++)
445 void handle(Throwable t) {
446 t.printStackTrace(System.out);
447 synchronized (failures) {
452 void launchThreads(final int numIter) {
453 for (int i = 0; i < NUM_THREADS; i++) {
454 threads[i] = new Thread() {
458 final Directory[] dirs = new Directory[numDirs];
459 for (int k = 0; k < numDirs; k++)
460 dirs[k] = new MockDirectoryWrapper(random, new RAMDirectory(addDir));
463 // System.out.println(Thread.currentThread().getName() + ": iter
465 for (int x=0; x < numIter; x++) {
466 // only do addIndexes
469 //if (numIter > 0 && j == numIter)
474 } catch (Throwable t) {
480 for (int i = 0; i < NUM_THREADS; i++)
484 void doBody(int j, Directory[] dirs) throws Throwable {
487 mainWriter.addIndexes(dirs);
488 mainWriter.optimize();
491 mainWriter.addIndexes(dirs);
492 numaddIndexes.incrementAndGet();
495 mainWriter.addIndexes(readers);
500 count.addAndGet(dirs.length*NUM_INIT_DOCS);
504 public void testIndexWriterReopenSegmentOptimize() throws Exception {
505 doTestIndexWriterReopenSegment(true);
508 public void testIndexWriterReopenSegment() throws Exception {
509 doTestIndexWriterReopenSegment(false);
513 * Tests creating a segment, then check to insure the segment can be seen via
516 public void doTestIndexWriterReopenSegment(boolean optimize) throws Exception {
517 Directory dir1 = newDirectory();
518 IndexWriter writer = new IndexWriter(dir1, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
519 writer.setInfoStream(infoStream);
520 IndexReader r1 = writer.getReader();
521 assertEquals(0, r1.maxDoc());
522 createIndexNoClose(false, "index1", writer);
523 writer.flush(!optimize, true);
525 IndexReader iwr1 = writer.getReader();
526 assertEquals(100, iwr1.maxDoc());
528 IndexReader r2 = writer.getReader();
529 assertEquals(r2.maxDoc(), 100);
531 for (int x = 10000; x < 10000 + 100; x++) {
532 Document d = createDocument(x, "index1", 5);
533 writer.addDocument(d);
535 writer.flush(false, true);
536 // verify the reader was reopened internally
537 IndexReader iwr2 = writer.getReader();
538 assertTrue(iwr2 != r1);
539 assertEquals(200, iwr2.maxDoc());
540 // should have flushed out a segment
541 IndexReader r3 = writer.getReader();
542 assertTrue(r2 != r3);
543 assertEquals(200, r3.maxDoc());
545 // dec ref the readers rather than close them because
546 // closing flushes changes to the writer
554 // test whether the changes made it to the directory
555 writer = new IndexWriter(dir1, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
556 IndexReader w2r1 = writer.getReader();
557 // insure the deletes were actually flushed to the directory
558 assertEquals(200, w2r1.maxDoc());
566 public static Document createDocument(int n, String indexName, int numFields) {
567 StringBuilder sb = new StringBuilder();
568 Document doc = new Document();
569 doc.add(new Field("id", Integer.toString(n), Store.YES, Index.NOT_ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
570 doc.add(new Field("indexname", indexName, Store.YES, Index.NOT_ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
573 doc.add(new Field("field1", sb.toString(), Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
576 for (int i = 1; i < numFields; i++) {
577 doc.add(new Field("field" + (i + 1), sb.toString(), Store.YES,
578 Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
584 * Delete a document by term and return the doc id
586 * public static int deleteDocument(Term term, IndexWriter writer) throws
587 * IOException { IndexReader reader = writer.getReader(); TermDocs td =
588 * reader.termDocs(term); int doc = -1; //if (td.next()) { // doc = td.doc();
589 * //} //writer.deleteDocuments(term); td.close(); return doc; }
592 public static void createIndex(Random random, Directory dir1, String indexName,
593 boolean multiSegment) throws IOException {
594 IndexWriter w = new IndexWriter(dir1, LuceneTestCase.newIndexWriterConfig(random,
595 TEST_VERSION_CURRENT, new MockAnalyzer(random))
596 .setMergePolicy(new LogDocMergePolicy()));
597 for (int i = 0; i < 100; i++) {
598 w.addDocument(createDocument(i, indexName, 4));
599 if (multiSegment && (i % 10) == 0) {
608 public static void createIndexNoClose(boolean multiSegment, String indexName,
609 IndexWriter w) throws IOException {
610 for (int i = 0; i < 100; i++) {
611 w.addDocument(createDocument(i, indexName, 4));
618 private static class MyWarmer extends IndexWriter.IndexReaderWarmer {
621 public void warm(IndexReader reader) throws IOException {
626 public void testMergeWarmer() throws Exception {
628 Directory dir1 = newDirectory();
630 MyWarmer warmer = new MyWarmer();
631 IndexWriter writer = new IndexWriter(
633 newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
634 setMaxBufferedDocs(2).
635 setMergedSegmentWarmer(warmer).
636 setMergeScheduler(new ConcurrentMergeScheduler()).
637 setMergePolicy(newLogMergePolicy())
639 writer.setInfoStream(infoStream);
642 createIndexNoClose(false, "test", writer);
644 // get a reader to put writer into near real-time mode
645 IndexReader r1 = writer.getReader();
647 ((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(2);
649 int num = atLeast(100);
650 for (int i = 0; i < num; i++) {
651 writer.addDocument(createDocument(i, "test", 4));
653 ((ConcurrentMergeScheduler) writer.getConfig().getMergeScheduler()).sync();
655 assertTrue(warmer.warmCount > 0);
656 final int count = warmer.warmCount;
658 writer.addDocument(createDocument(17, "test", 4));
660 assertTrue(warmer.warmCount > count);
667 public void testAfterCommit() throws Exception {
668 Directory dir1 = newDirectory();
669 IndexWriter writer = new IndexWriter(dir1, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergeScheduler(new ConcurrentMergeScheduler()));
671 writer.setInfoStream(infoStream);
674 createIndexNoClose(false, "test", writer);
676 // get a reader to put writer into near real-time mode
677 IndexReader r1 = writer.getReader();
678 _TestUtil.checkIndex(dir1);
680 _TestUtil.checkIndex(dir1);
681 assertEquals(100, r1.numDocs());
683 for (int i = 0; i < 10; i++) {
684 writer.addDocument(createDocument(i, "test", 4));
686 ((ConcurrentMergeScheduler) writer.getConfig().getMergeScheduler()).sync();
688 IndexReader r2 = r1.reopen();
693 assertEquals(110, r1.numDocs());
699 // Make sure reader remains usable even if IndexWriter closes
700 public void testAfterClose() throws Exception {
701 Directory dir1 = newDirectory();
702 IndexWriter writer = new IndexWriter(dir1, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
703 writer.setInfoStream(infoStream);
706 createIndexNoClose(false, "test", writer);
708 IndexReader r = writer.getReader();
711 _TestUtil.checkIndex(dir1);
713 // reader should remain usable even after IndexWriter is closed:
714 assertEquals(100, r.numDocs());
715 Query q = new TermQuery(new Term("indexname", "test"));
716 IndexSearcher searcher = newSearcher(r);
717 assertEquals(100, searcher.search(q, 10).totalHits);
721 fail("failed to hit AlreadyClosedException");
722 } catch (AlreadyClosedException ace) {
729 // Stress test reopen during addIndexes
730 public void testDuringAddIndexes() throws Exception {
731 MockDirectoryWrapper dir1 = newDirectory();
732 final IndexWriter writer = new IndexWriter(
734 newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).
735 setMergePolicy(newLogMergePolicy(2))
737 writer.setInfoStream(infoStream);
738 ((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(2);
741 createIndexNoClose(false, "test", writer);
744 final Directory[] dirs = new Directory[10];
745 for (int i=0;i<10;i++) {
746 dirs[i] = new MockDirectoryWrapper(random, new RAMDirectory(dir1));
749 IndexReader r = writer.getReader();
751 final int NUM_THREAD = 5;
752 final float SECONDS = 0.5f;
754 final long endTime = (long) (System.currentTimeMillis() + 1000.*SECONDS);
755 final List<Throwable> excs = Collections.synchronizedList(new ArrayList<Throwable>());
757 final Thread[] threads = new Thread[NUM_THREAD];
758 for(int i=0;i<NUM_THREAD;i++) {
759 threads[i] = new Thread() {
764 writer.addIndexes(dirs);
766 } catch (Throwable t) {
768 throw new RuntimeException(t);
770 } while(System.currentTimeMillis() < endTime);
773 threads[i].setDaemon(true);
778 while(System.currentTimeMillis() < endTime) {
779 IndexReader r2 = r.reopen();
784 Query q = new TermQuery(new Term("indexname", "test"));
785 IndexSearcher searcher = newSearcher(r);
786 final int count = searcher.search(q, 10).totalHits;
788 assertTrue(count >= lastCount);
792 for(int i=0;i<NUM_THREAD;i++) {
796 IndexReader r2 = r.reopen();
801 Query q = new TermQuery(new Term("indexname", "test"));
802 IndexSearcher searcher = newSearcher(r);
803 final int count = searcher.search(q, 10).totalHits;
805 assertTrue(count >= lastCount);
807 assertEquals(0, excs.size());
809 assertEquals(0, dir1.getOpenDeletedFiles().size());
816 // Stress test reopen during add/delete
817 public void testDuringAddDelete() throws Exception {
818 Directory dir1 = newDirectory();
819 final IndexWriter writer = new IndexWriter(
821 newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
822 setMergePolicy(newLogMergePolicy(2))
824 writer.setInfoStream(infoStream);
825 ((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(2);
828 createIndexNoClose(false, "test", writer);
831 IndexReader r = writer.getReader();
833 final int NUM_THREAD = 5;
834 final float SECONDS = 0.5f;
836 final long endTime = (long) (System.currentTimeMillis() + 1000.*SECONDS);
837 final List<Throwable> excs = Collections.synchronizedList(new ArrayList<Throwable>());
839 final Thread[] threads = new Thread[NUM_THREAD];
840 for(int i=0;i<NUM_THREAD;i++) {
841 threads[i] = new Thread() {
842 final Random r = new Random(random.nextLong());
849 for(int docUpto=0;docUpto<10;docUpto++) {
850 writer.addDocument(createDocument(10*count+docUpto, "test", 4));
853 final int limit = count*10;
854 for(int delUpto=0;delUpto<5;delUpto++) {
855 int x = r.nextInt(limit);
856 writer.deleteDocuments(new Term("field3", "b"+x));
858 } catch (Throwable t) {
860 throw new RuntimeException(t);
862 } while(System.currentTimeMillis() < endTime);
865 threads[i].setDaemon(true);
870 while(System.currentTimeMillis() < endTime) {
871 IndexReader r2 = r.reopen();
876 Query q = new TermQuery(new Term("indexname", "test"));
877 IndexSearcher searcher = newSearcher(r);
878 sum += searcher.search(q, 10).totalHits;
882 for(int i=0;i<NUM_THREAD;i++) {
885 // at least search once
886 IndexReader r2 = r.reopen();
891 Query q = new TermQuery(new Term("indexname", "test"));
892 IndexSearcher searcher = newSearcher(r);
893 sum += searcher.search(q, 10).totalHits;
895 assertTrue("no documents found at all", sum > 0);
897 assertEquals(0, excs.size());
904 public void testExpungeDeletes() throws Throwable {
905 Directory dir = newDirectory();
906 final IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy()));
907 Document doc = new Document();
908 doc.add(newField("field", "a b c", Field.Store.NO, Field.Index.ANALYZED));
909 Field id = newField("id", "", Field.Store.NO, Field.Index.NOT_ANALYZED);
915 w.deleteDocuments(new Term("id", "0"));
917 IndexReader r = w.getReader();
921 r = IndexReader.open(dir, true);
922 assertEquals(1, r.numDocs());
923 assertFalse(r.hasDeletions());
928 public void testDeletesNumDocs() throws Throwable {
929 Directory dir = newDirectory();
930 final IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
931 Document doc = new Document();
932 doc.add(newField("field", "a b c", Field.Store.NO, Field.Index.ANALYZED));
933 Field id = newField("id", "", Field.Store.NO, Field.Index.NOT_ANALYZED);
939 IndexReader r = w.getReader();
940 assertEquals(2, r.numDocs());
943 w.deleteDocuments(new Term("id", "0"));
945 assertEquals(1, r.numDocs());
948 w.deleteDocuments(new Term("id", "1"));
950 assertEquals(0, r.numDocs());
957 public void testEmptyIndex() throws Exception {
958 // Ensures that getReader works on an empty index, which hasn't been committed yet.
959 Directory dir = newDirectory();
960 IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
961 IndexReader r = w.getReader();
962 assertEquals(0, r.numDocs());
968 public void testSegmentWarmer() throws Exception {
969 Directory dir = newDirectory();
970 final AtomicBoolean didWarm = new AtomicBoolean();
971 IndexWriter w = new IndexWriter(
973 newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).
974 setMaxBufferedDocs(2).
975 setReaderPooling(true).
976 setMergedSegmentWarmer(new IndexWriter.IndexReaderWarmer() {
978 public void warm(IndexReader r) throws IOException {
979 IndexSearcher s = newSearcher(r);
980 TopDocs hits = s.search(new TermQuery(new Term("foo", "bar")), 10);
981 assertEquals(20, hits.totalHits);
986 setMergePolicy(newLogMergePolicy(10))
989 Document doc = new Document();
990 doc.add(newField("foo", "bar", Field.Store.YES, Field.Index.NOT_ANALYZED));
991 for(int i=0;i<20;i++) {
997 assertTrue(didWarm.get());
1000 public void testNoTermsIndex() throws Exception {
1001 Directory dir = newDirectory();
1002 IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(
1003 TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT))
1004 .setReaderTermsIndexDivisor(-1));
1005 Document doc = new Document();
1006 doc.add(new Field("f", "val", Store.NO, Index.ANALYZED));
1008 IndexReader r = IndexReader.open(w, true);
1010 r.termDocs(new Term("f", "val"));
1011 fail("should have failed to seek since terms index was not loaded");
1012 } catch (IllegalStateException e) {
1013 // expected - we didn't load the term index