1 package org.apache.lucene.index;
4 * Copyright 2004 The Apache Software Foundation
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
19 import java.io.IOException;
20 import java.io.PrintStream;
21 import java.util.ArrayList;
22 import java.util.Collections;
23 import java.util.List;
24 import java.util.Random;
25 import java.util.concurrent.atomic.AtomicBoolean;
27 import org.apache.lucene.analysis.MockAnalyzer;
28 import org.apache.lucene.analysis.WhitespaceAnalyzer;
29 import org.apache.lucene.document.Document;
30 import org.apache.lucene.document.Field;
31 import org.apache.lucene.document.Field.Index;
32 import org.apache.lucene.document.Field.Store;
33 import org.apache.lucene.document.Field.TermVector;
34 import org.apache.lucene.search.TermQuery;
35 import org.apache.lucene.search.IndexSearcher;
36 import org.apache.lucene.search.Query;
37 import org.apache.lucene.search.TopDocs;
38 import org.apache.lucene.store.Directory;
39 import org.apache.lucene.store.MockDirectoryWrapper;
40 import org.apache.lucene.store.AlreadyClosedException;
41 import org.apache.lucene.store.RAMDirectory;
42 import org.apache.lucene.util.LuceneTestCase;
43 import org.apache.lucene.util._TestUtil;
44 import org.apache.lucene.util.ThreadInterruptedException;
45 import java.util.concurrent.atomic.AtomicInteger;
47 public class TestIndexWriterReader extends LuceneTestCase {
48 static PrintStream infoStream = VERBOSE ? System.out : null;
50 public static int count(Term t, IndexReader r) throws IOException {
52 TermDocs td = r.termDocs(t);
61 public void testAddCloseOpen() throws IOException {
62 Directory dir1 = newDirectory();
63 IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random));
65 IndexWriter writer = new IndexWriter(dir1, iwc);
66 for (int i = 0; i < 97 ; i++) {
67 IndexReader reader = writer.getReader();
69 writer.addDocument(DocHelper.createDocument(i, "x", 1 + random.nextInt(5)));
71 int previous = random.nextInt(i);
72 // a check if the reader is current here could fail since there might be
74 switch (random.nextInt(5)) {
78 writer.addDocument(DocHelper.createDocument(i, "x", 1 + random.nextInt(5)));
81 writer.updateDocument(new Term("id", "" + previous), DocHelper.createDocument(
82 previous, "x", 1 + random.nextInt(5)));
85 writer.deleteDocuments(new Term("id", "" + previous));
88 assertFalse(reader.isCurrent());
91 writer.optimize(); // make sure all merging is done etc.
92 IndexReader reader = writer.getReader();
93 writer.commit(); // no changes that are not visible to the reader
94 assertTrue(reader.isCurrent());
96 assertTrue(reader.isCurrent()); // all changes are visible to the reader
97 iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random));
98 writer = new IndexWriter(dir1, iwc);
99 assertTrue(reader.isCurrent());
100 writer.addDocument(DocHelper.createDocument(1, "x", 1+random.nextInt(5)));
101 assertTrue(reader.isCurrent()); // segments in ram but IW is different to the readers one
103 assertFalse(reader.isCurrent()); // segments written
108 public void testUpdateDocument() throws Exception {
109 boolean optimize = true;
111 Directory dir1 = newDirectory();
112 IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random));
113 if (iwc.getMaxBufferedDocs() < 20) {
114 iwc.setMaxBufferedDocs(20);
117 if (random.nextBoolean()) {
118 iwc.setMergePolicy(NoMergePolicy.NO_COMPOUND_FILES);
120 iwc.setMergePolicy(NoMergePolicy.COMPOUND_FILES);
122 IndexWriter writer = new IndexWriter(dir1, iwc);
125 createIndexNoClose(!optimize, "index1", writer);
127 // writer.flush(false, true, true);
130 IndexReader r1 = writer.getReader();
131 assertTrue(r1.isCurrent());
133 String id10 = r1.document(10).getField("id").stringValue();
135 Document newDoc = r1.document(10);
136 newDoc.removeField("id");
137 newDoc.add(newField("id", Integer.toString(8000), Store.YES, Index.NOT_ANALYZED));
138 writer.updateDocument(new Term("id", id10), newDoc);
139 assertFalse(r1.isCurrent());
141 IndexReader r2 = writer.getReader();
142 assertTrue(r2.isCurrent());
143 assertEquals(0, count(new Term("id", id10), r2));
144 assertEquals(1, count(new Term("id", Integer.toString(8000)), r2));
148 assertTrue(r2.isCurrent());
150 IndexReader r3 = IndexReader.open(dir1, true);
151 assertTrue(r3.isCurrent());
152 assertTrue(r2.isCurrent());
153 assertEquals(0, count(new Term("id", id10), r3));
154 assertEquals(1, count(new Term("id", Integer.toString(8000)), r3));
156 writer = new IndexWriter(dir1, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
157 Document doc = new Document();
158 doc.add(newField("field", "a b c", Field.Store.NO, Field.Index.ANALYZED));
159 writer.addDocument(doc);
160 assertTrue(r2.isCurrent());
161 assertTrue(r3.isCurrent());
165 assertFalse(r2.isCurrent());
166 assertTrue(!r3.isCurrent());
174 public void testIsCurrent() throws IOException {
175 Directory dir = newDirectory();
176 IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random));
178 IndexWriter writer = new IndexWriter(dir, iwc);
179 Document doc = new Document();
180 doc.add(newField("field", "a b c", Field.Store.NO, Field.Index.ANALYZED));
181 writer.addDocument(doc);
184 iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random));
185 writer = new IndexWriter(dir, iwc);
186 doc = new Document();
187 doc.add(newField("field", "a b c", Field.Store.NO, Field.Index.ANALYZED));
188 IndexReader nrtReader = writer.getReader();
189 assertTrue(nrtReader.isCurrent());
190 writer.addDocument(doc);
191 assertFalse(nrtReader.isCurrent()); // should see the changes
192 writer.optimize(); // make sure we don't have a merge going on
193 assertFalse(nrtReader.isCurrent());
196 IndexReader dirReader = IndexReader.open(dir);
197 nrtReader = writer.getReader();
199 assertTrue(dirReader.isCurrent());
200 assertTrue(nrtReader.isCurrent()); // nothing was committed yet so we are still current
201 assertEquals(2, nrtReader.maxDoc()); // sees the actual document added
202 assertEquals(1, dirReader.maxDoc());
203 writer.close(); // close is actually a commit both should see the changes
204 assertTrue(nrtReader.isCurrent());
205 assertFalse(dirReader.isCurrent()); // this reader has been opened before the writer was closed / committed
213 * Test using IW.addIndexes
217 public void testAddIndexes() throws Exception {
218 boolean optimize = false;
220 Directory dir1 = newDirectory();
221 IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random));
222 if (iwc.getMaxBufferedDocs() < 20) {
223 iwc.setMaxBufferedDocs(20);
226 if (random.nextBoolean()) {
227 iwc.setMergePolicy(NoMergePolicy.NO_COMPOUND_FILES);
229 iwc.setMergePolicy(NoMergePolicy.COMPOUND_FILES);
231 IndexWriter writer = new IndexWriter(dir1, iwc);
233 writer.setInfoStream(infoStream);
235 createIndexNoClose(!optimize, "index1", writer);
236 writer.flush(false, true);
238 // create a 2nd index
239 Directory dir2 = newDirectory();
240 IndexWriter writer2 = new IndexWriter(dir2, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
241 writer2.setInfoStream(infoStream);
242 createIndexNoClose(!optimize, "index2", writer2);
245 IndexReader r0 = writer.getReader();
246 assertTrue(r0.isCurrent());
247 writer.addIndexes(new Directory[] { dir2 });
248 assertFalse(r0.isCurrent());
251 IndexReader r1 = writer.getReader();
252 assertTrue(r1.isCurrent());
255 assertTrue(r1.isCurrent()); // we have seen all changes - no change after opening the NRT reader
257 assertEquals(200, r1.maxDoc());
259 int index2df = r1.docFreq(new Term("indexname", "index2"));
261 assertEquals(100, index2df);
263 // verify the docs are from different indexes
264 Document doc5 = r1.document(5);
265 assertEquals("index1", doc5.get("indexname"));
266 Document doc150 = r1.document(150);
267 assertEquals("index2", doc150.get("indexname"));
274 public void testAddIndexes2() throws Exception {
275 boolean optimize = false;
277 Directory dir1 = newDirectory();
278 IndexWriter writer = new IndexWriter(dir1, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
279 writer.setInfoStream(infoStream);
281 // create a 2nd index
282 Directory dir2 = newDirectory();
283 IndexWriter writer2 = new IndexWriter(dir2, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
284 writer2.setInfoStream(infoStream);
285 createIndexNoClose(!optimize, "index2", writer2);
288 writer.addIndexes(new Directory[] { dir2 });
289 writer.addIndexes(new Directory[] { dir2 });
290 writer.addIndexes(new Directory[] { dir2 });
291 writer.addIndexes(new Directory[] { dir2 });
292 writer.addIndexes(new Directory[] { dir2 });
294 IndexReader r1 = writer.getReader();
295 assertEquals(500, r1.maxDoc());
304 * Deletes using IW.deleteDocuments
308 public void testDeleteFromIndexWriter() throws Exception {
309 boolean optimize = true;
311 Directory dir1 = newDirectory();
312 IndexWriter writer = new IndexWriter(dir1, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setReaderTermsIndexDivisor(2));
313 writer.setInfoStream(infoStream);
315 createIndexNoClose(!optimize, "index1", writer);
316 writer.flush(false, true);
318 IndexReader r1 = writer.getReader();
320 String id10 = r1.document(10).getField("id").stringValue();
322 // deleted IW docs should not show up in the next getReader
323 writer.deleteDocuments(new Term("id", id10));
324 IndexReader r2 = writer.getReader();
325 assertEquals(1, count(new Term("id", id10), r1));
326 assertEquals(0, count(new Term("id", id10), r2));
328 String id50 = r1.document(50).getField("id").stringValue();
329 assertEquals(1, count(new Term("id", id50), r1));
331 writer.deleteDocuments(new Term("id", id50));
333 IndexReader r3 = writer.getReader();
334 assertEquals(0, count(new Term("id", id10), r3));
335 assertEquals(0, count(new Term("id", id50), r3));
337 String id75 = r1.document(75).getField("id").stringValue();
338 writer.deleteDocuments(new TermQuery(new Term("id", id75)));
339 IndexReader r4 = writer.getReader();
340 assertEquals(1, count(new Term("id", id75), r3));
341 assertEquals(0, count(new Term("id", id75), r4));
349 // reopen the writer to verify the delete made it to the directory
350 writer = new IndexWriter(dir1, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
351 writer.setInfoStream(infoStream);
352 IndexReader w2r1 = writer.getReader();
353 assertEquals(0, count(new Term("id", id10), w2r1));
359 public void testAddIndexesAndDoDeletesThreads() throws Throwable {
360 final int numIter = 2;
363 Directory mainDir = newDirectory();
364 IndexWriter mainWriter = new IndexWriter(mainDir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy()));
365 _TestUtil.reduceOpenFiles(mainWriter);
367 mainWriter.setInfoStream(infoStream);
368 AddDirectoriesThreads addDirThreads = new AddDirectoriesThreads(numIter, mainWriter);
369 addDirThreads.launchThreads(numDirs);
370 addDirThreads.joinThreads();
372 //assertEquals(100 + numDirs * (3 * numIter / 4) * addDirThreads.NUM_THREADS
373 // * addDirThreads.NUM_INIT_DOCS, addDirThreads.mainWriter.numDocs());
374 assertEquals(addDirThreads.count.intValue(), addDirThreads.mainWriter.numDocs());
376 addDirThreads.close(true);
378 assertTrue(addDirThreads.failures.size() == 0);
380 _TestUtil.checkIndex(mainDir);
382 IndexReader reader = IndexReader.open(mainDir, true);
383 assertEquals(addDirThreads.count.intValue(), reader.numDocs());
384 //assertEquals(100 + numDirs * (3 * numIter / 4) * addDirThreads.NUM_THREADS
385 // * addDirThreads.NUM_INIT_DOCS, reader.numDocs());
388 addDirThreads.closeDir();
392 private class AddDirectoriesThreads {
394 final static int NUM_THREADS = 5;
395 final static int NUM_INIT_DOCS = 100;
397 final Thread[] threads = new Thread[NUM_THREADS];
398 IndexWriter mainWriter;
399 final List<Throwable> failures = new ArrayList<Throwable>();
400 IndexReader[] readers;
401 boolean didClose = false;
402 AtomicInteger count = new AtomicInteger(0);
403 AtomicInteger numaddIndexes = new AtomicInteger(0);
405 public AddDirectoriesThreads(int numDirs, IndexWriter mainWriter) throws Throwable {
406 this.numDirs = numDirs;
407 this.mainWriter = mainWriter;
408 addDir = newDirectory();
409 IndexWriter writer = new IndexWriter(addDir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(2));
410 for (int i = 0; i < NUM_INIT_DOCS; i++) {
411 Document doc = DocHelper.createDocument(i, "addindex", 4);
412 writer.addDocument(doc);
417 readers = new IndexReader[numDirs];
418 for (int i = 0; i < numDirs; i++)
419 readers[i] = IndexReader.open(addDir, false);
423 for (int i = 0; i < NUM_THREADS; i++)
426 } catch (InterruptedException ie) {
427 throw new ThreadInterruptedException(ie);
431 void close(boolean doWait) throws Throwable {
434 mainWriter.waitForMerges();
436 mainWriter.close(doWait);
439 void closeDir() throws Throwable {
440 for (int i = 0; i < numDirs; i++)
445 void handle(Throwable t) {
446 t.printStackTrace(System.out);
447 synchronized (failures) {
452 void launchThreads(final int numIter) {
453 for (int i = 0; i < NUM_THREADS; i++) {
454 threads[i] = new Thread() {
458 final Directory[] dirs = new Directory[numDirs];
459 for (int k = 0; k < numDirs; k++)
460 dirs[k] = new MockDirectoryWrapper(random, new RAMDirectory(addDir));
463 // System.out.println(Thread.currentThread().getName() + ": iter
465 for (int x=0; x < numIter; x++) {
466 // only do addIndexes
469 //if (numIter > 0 && j == numIter)
474 } catch (Throwable t) {
480 for (int i = 0; i < NUM_THREADS; i++)
484 void doBody(int j, Directory[] dirs) throws Throwable {
487 mainWriter.addIndexes(dirs);
488 mainWriter.optimize();
491 mainWriter.addIndexes(dirs);
492 numaddIndexes.incrementAndGet();
495 mainWriter.addIndexes(readers);
500 count.addAndGet(dirs.length*NUM_INIT_DOCS);
504 public void testIndexWriterReopenSegmentOptimize() throws Exception {
505 doTestIndexWriterReopenSegment(true);
508 public void testIndexWriterReopenSegment() throws Exception {
509 doTestIndexWriterReopenSegment(false);
513 * Tests creating a segment, then check to insure the segment can be seen via
516 public void doTestIndexWriterReopenSegment(boolean optimize) throws Exception {
517 Directory dir1 = newDirectory();
518 IndexWriter writer = new IndexWriter(dir1, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
519 writer.setInfoStream(infoStream);
520 IndexReader r1 = writer.getReader();
521 assertEquals(0, r1.maxDoc());
522 createIndexNoClose(false, "index1", writer);
523 writer.flush(!optimize, true);
525 IndexReader iwr1 = writer.getReader();
526 assertEquals(100, iwr1.maxDoc());
528 IndexReader r2 = writer.getReader();
529 assertEquals(r2.maxDoc(), 100);
531 for (int x = 10000; x < 10000 + 100; x++) {
532 Document d = DocHelper.createDocument(x, "index1", 5);
533 writer.addDocument(d);
535 writer.flush(false, true);
536 // verify the reader was reopened internally
537 IndexReader iwr2 = writer.getReader();
538 assertTrue(iwr2 != r1);
539 assertEquals(200, iwr2.maxDoc());
540 // should have flushed out a segment
541 IndexReader r3 = writer.getReader();
542 assertTrue(r2 != r3);
543 assertEquals(200, r3.maxDoc());
545 // dec ref the readers rather than close them because
546 // closing flushes changes to the writer
554 // test whether the changes made it to the directory
555 writer = new IndexWriter(dir1, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
556 IndexReader w2r1 = writer.getReader();
557 // insure the deletes were actually flushed to the directory
558 assertEquals(200, w2r1.maxDoc());
566 * Delete a document by term and return the doc id
568 * public static int deleteDocument(Term term, IndexWriter writer) throws
569 * IOException { IndexReader reader = writer.getReader(); TermDocs td =
570 * reader.termDocs(term); int doc = -1; //if (td.next()) { // doc = td.doc();
571 * //} //writer.deleteDocuments(term); td.close(); return doc; }
574 public static void createIndex(Random random, Directory dir1, String indexName,
575 boolean multiSegment) throws IOException {
576 IndexWriter w = new IndexWriter(dir1, LuceneTestCase.newIndexWriterConfig(random,
577 TEST_VERSION_CURRENT, new MockAnalyzer(random))
578 .setMergePolicy(new LogDocMergePolicy()));
579 for (int i = 0; i < 100; i++) {
580 w.addDocument(DocHelper.createDocument(i, indexName, 4));
581 if (multiSegment && (i % 10) == 0) {
590 public static void createIndexNoClose(boolean multiSegment, String indexName,
591 IndexWriter w) throws IOException {
592 for (int i = 0; i < 100; i++) {
593 w.addDocument(DocHelper.createDocument(i, indexName, 4));
600 private static class MyWarmer extends IndexWriter.IndexReaderWarmer {
603 public void warm(IndexReader reader) throws IOException {
608 public void testMergeWarmer() throws Exception {
610 Directory dir1 = newDirectory();
612 MyWarmer warmer = new MyWarmer();
613 IndexWriter writer = new IndexWriter(
615 newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
616 setMaxBufferedDocs(2).
617 setMergedSegmentWarmer(warmer).
618 setMergeScheduler(new ConcurrentMergeScheduler()).
619 setMergePolicy(newLogMergePolicy())
621 writer.setInfoStream(infoStream);
624 createIndexNoClose(false, "test", writer);
626 // get a reader to put writer into near real-time mode
627 IndexReader r1 = writer.getReader();
629 ((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(2);
631 int num = atLeast(100);
632 for (int i = 0; i < num; i++) {
633 writer.addDocument(DocHelper.createDocument(i, "test", 4));
635 ((ConcurrentMergeScheduler) writer.getConfig().getMergeScheduler()).sync();
637 assertTrue(warmer.warmCount > 0);
638 final int count = warmer.warmCount;
640 writer.addDocument(DocHelper.createDocument(17, "test", 4));
642 assertTrue(warmer.warmCount > count);
649 public void testAfterCommit() throws Exception {
650 Directory dir1 = newDirectory();
651 IndexWriter writer = new IndexWriter(dir1, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergeScheduler(new ConcurrentMergeScheduler()));
653 writer.setInfoStream(infoStream);
656 createIndexNoClose(false, "test", writer);
658 // get a reader to put writer into near real-time mode
659 IndexReader r1 = writer.getReader();
660 _TestUtil.checkIndex(dir1);
662 _TestUtil.checkIndex(dir1);
663 assertEquals(100, r1.numDocs());
665 for (int i = 0; i < 10; i++) {
666 writer.addDocument(DocHelper.createDocument(i, "test", 4));
668 ((ConcurrentMergeScheduler) writer.getConfig().getMergeScheduler()).sync();
670 IndexReader r2 = r1.reopen();
675 assertEquals(110, r1.numDocs());
681 // Make sure reader remains usable even if IndexWriter closes
682 public void testAfterClose() throws Exception {
683 Directory dir1 = newDirectory();
684 IndexWriter writer = new IndexWriter(dir1, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
685 writer.setInfoStream(infoStream);
688 createIndexNoClose(false, "test", writer);
690 IndexReader r = writer.getReader();
693 _TestUtil.checkIndex(dir1);
695 // reader should remain usable even after IndexWriter is closed:
696 assertEquals(100, r.numDocs());
697 Query q = new TermQuery(new Term("indexname", "test"));
698 IndexSearcher searcher = newSearcher(r);
699 assertEquals(100, searcher.search(q, 10).totalHits);
703 fail("failed to hit AlreadyClosedException");
704 } catch (AlreadyClosedException ace) {
711 // Stress test reopen during addIndexes
712 public void testDuringAddIndexes() throws Exception {
713 MockDirectoryWrapper dir1 = newDirectory();
714 final IndexWriter writer = new IndexWriter(
716 newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).
717 setMergePolicy(newLogMergePolicy(2))
719 writer.setInfoStream(infoStream);
720 ((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(2);
723 createIndexNoClose(false, "test", writer);
726 final Directory[] dirs = new Directory[10];
727 for (int i=0;i<10;i++) {
728 dirs[i] = new MockDirectoryWrapper(random, new RAMDirectory(dir1));
731 IndexReader r = writer.getReader();
733 final int NUM_THREAD = 5;
734 final float SECONDS = 0.5f;
736 final long endTime = (long) (System.currentTimeMillis() + 1000.*SECONDS);
737 final List<Throwable> excs = Collections.synchronizedList(new ArrayList<Throwable>());
739 final Thread[] threads = new Thread[NUM_THREAD];
740 for(int i=0;i<NUM_THREAD;i++) {
741 threads[i] = new Thread() {
746 writer.addIndexes(dirs);
748 } catch (Throwable t) {
750 throw new RuntimeException(t);
752 } while(System.currentTimeMillis() < endTime);
755 threads[i].setDaemon(true);
760 while(System.currentTimeMillis() < endTime) {
761 IndexReader r2 = r.reopen();
766 Query q = new TermQuery(new Term("indexname", "test"));
767 IndexSearcher searcher = newSearcher(r);
768 final int count = searcher.search(q, 10).totalHits;
770 assertTrue(count >= lastCount);
774 for(int i=0;i<NUM_THREAD;i++) {
778 IndexReader r2 = r.reopen();
783 Query q = new TermQuery(new Term("indexname", "test"));
784 IndexSearcher searcher = newSearcher(r);
785 final int count = searcher.search(q, 10).totalHits;
787 assertTrue(count >= lastCount);
789 assertEquals(0, excs.size());
791 assertEquals(0, dir1.getOpenDeletedFiles().size());
798 // Stress test reopen during add/delete
799 public void testDuringAddDelete() throws Exception {
800 Directory dir1 = newDirectory();
801 final IndexWriter writer = new IndexWriter(
803 newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
804 setMergePolicy(newLogMergePolicy(2))
806 writer.setInfoStream(infoStream);
807 ((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(2);
810 createIndexNoClose(false, "test", writer);
813 IndexReader r = writer.getReader();
815 final int NUM_THREAD = 5;
816 final float SECONDS = 0.5f;
818 final long endTime = (long) (System.currentTimeMillis() + 1000.*SECONDS);
819 final List<Throwable> excs = Collections.synchronizedList(new ArrayList<Throwable>());
821 final Thread[] threads = new Thread[NUM_THREAD];
822 for(int i=0;i<NUM_THREAD;i++) {
823 threads[i] = new Thread() {
824 final Random r = new Random(random.nextLong());
831 for(int docUpto=0;docUpto<10;docUpto++) {
832 writer.addDocument(DocHelper.createDocument(10*count+docUpto, "test", 4));
835 final int limit = count*10;
836 for(int delUpto=0;delUpto<5;delUpto++) {
837 int x = r.nextInt(limit);
838 writer.deleteDocuments(new Term("field3", "b"+x));
840 } catch (Throwable t) {
842 throw new RuntimeException(t);
844 } while(System.currentTimeMillis() < endTime);
847 threads[i].setDaemon(true);
852 while(System.currentTimeMillis() < endTime) {
853 IndexReader r2 = r.reopen();
858 Query q = new TermQuery(new Term("indexname", "test"));
859 IndexSearcher searcher = newSearcher(r);
860 sum += searcher.search(q, 10).totalHits;
864 for(int i=0;i<NUM_THREAD;i++) {
867 // at least search once
868 IndexReader r2 = r.reopen();
873 Query q = new TermQuery(new Term("indexname", "test"));
874 IndexSearcher searcher = newSearcher(r);
875 sum += searcher.search(q, 10).totalHits;
877 assertTrue("no documents found at all", sum > 0);
879 assertEquals(0, excs.size());
886 public void testExpungeDeletes() throws Throwable {
887 Directory dir = newDirectory();
888 final IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy()));
889 Document doc = new Document();
890 doc.add(newField("field", "a b c", Field.Store.NO, Field.Index.ANALYZED));
891 Field id = newField("id", "", Field.Store.NO, Field.Index.NOT_ANALYZED);
897 w.deleteDocuments(new Term("id", "0"));
899 IndexReader r = w.getReader();
903 r = IndexReader.open(dir, true);
904 assertEquals(1, r.numDocs());
905 assertFalse(r.hasDeletions());
910 public void testDeletesNumDocs() throws Throwable {
911 Directory dir = newDirectory();
912 final IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
913 Document doc = new Document();
914 doc.add(newField("field", "a b c", Field.Store.NO, Field.Index.ANALYZED));
915 Field id = newField("id", "", Field.Store.NO, Field.Index.NOT_ANALYZED);
921 IndexReader r = w.getReader();
922 assertEquals(2, r.numDocs());
925 w.deleteDocuments(new Term("id", "0"));
927 assertEquals(1, r.numDocs());
930 w.deleteDocuments(new Term("id", "1"));
932 assertEquals(0, r.numDocs());
939 public void testEmptyIndex() throws Exception {
940 // Ensures that getReader works on an empty index, which hasn't been committed yet.
941 Directory dir = newDirectory();
942 IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
943 IndexReader r = w.getReader();
944 assertEquals(0, r.numDocs());
950 public void testSegmentWarmer() throws Exception {
951 Directory dir = newDirectory();
952 final AtomicBoolean didWarm = new AtomicBoolean();
953 IndexWriter w = new IndexWriter(
955 newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).
956 setMaxBufferedDocs(2).
957 setReaderPooling(true).
958 setMergedSegmentWarmer(new IndexWriter.IndexReaderWarmer() {
960 public void warm(IndexReader r) throws IOException {
961 IndexSearcher s = newSearcher(r);
962 TopDocs hits = s.search(new TermQuery(new Term("foo", "bar")), 10);
963 assertEquals(20, hits.totalHits);
968 setMergePolicy(newLogMergePolicy(10))
971 Document doc = new Document();
972 doc.add(newField("foo", "bar", Field.Store.YES, Field.Index.NOT_ANALYZED));
973 for(int i=0;i<20;i++) {
979 assertTrue(didWarm.get());
982 public void testNoTermsIndex() throws Exception {
983 Directory dir = newDirectory();
984 IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(
985 TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT))
986 .setReaderTermsIndexDivisor(-1));
987 Document doc = new Document();
988 doc.add(new Field("f", "val", Store.NO, Index.ANALYZED));
990 IndexReader r = IndexReader.open(w, true);
992 r.termDocs(new Term("f", "val"));
993 fail("should have failed to seek since terms index was not loaded");
994 } catch (IllegalStateException e) {
995 // expected - we didn't load the term index