1 package org.apache.lucene.index;
4 * Licensed to the Apache Software Foundation (ASF) under one or more
5 * contributor license agreements. See the NOTICE file distributed with
6 * this work for additional information regarding copyright ownership.
7 * The ASF licenses this file to You under the Apache License, Version 2.0
8 * (the "License"); you may not use this file except in compliance with
9 * the License. You may obtain a copy of the License at
11 * http://www.apache.org/licenses/LICENSE-2.0
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
20 import java.io.IOException;
21 import java.io.FileNotFoundException;
22 import java.util.ArrayList;
23 import java.util.List;
25 import org.apache.lucene.util.LuceneTestCase;
26 import org.apache.lucene.util._TestUtil;
27 import org.apache.lucene.analysis.MockAnalyzer;
28 import org.apache.lucene.document.Document;
29 import org.apache.lucene.document.Field;
30 import org.apache.lucene.document.Field.Index;
31 import org.apache.lucene.document.Field.Store;
32 import org.apache.lucene.document.Field.TermVector;
33 import org.apache.lucene.index.IndexWriterConfig.OpenMode;
34 import org.apache.lucene.store.AlreadyClosedException;
35 import org.apache.lucene.store.Directory;
36 import org.apache.lucene.store.MockDirectoryWrapper;
37 import org.apache.lucene.store.RAMDirectory;
39 import org.apache.lucene.search.PhraseQuery;
41 public class TestAddIndexes extends LuceneTestCase {
43 public void testSimpleCase() throws IOException {
45 Directory dir = newDirectory();
46 // two auxiliary directories
47 Directory aux = newDirectory();
48 Directory aux2 = newDirectory();
50 IndexWriter writer = null;
52 writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT,
53 new MockAnalyzer(random))
54 .setOpenMode(OpenMode.CREATE));
57 assertEquals(100, writer.maxDoc());
62 newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
63 setOpenMode(OpenMode.CREATE).
64 setMergePolicy(newLogMergePolicy(false))
66 // add 40 documents in separate files
68 assertEquals(40, writer.maxDoc());
71 writer = newWriter(aux2, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.CREATE));
72 // add 50 documents in compound files
74 assertEquals(50, writer.maxDoc());
77 // test doc count before segments are merged
78 writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND));
79 assertEquals(100, writer.maxDoc());
80 writer.addIndexes(new Directory[] { aux, aux2 });
81 assertEquals(190, writer.maxDoc());
84 // make sure the old index is correct
85 verifyNumDocs(aux, 40);
87 // make sure the new index is correct
88 verifyNumDocs(dir, 190);
90 // now add another set in.
91 Directory aux3 = newDirectory();
92 writer = newWriter(aux3, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
95 assertEquals(40, writer.maxDoc());
98 // test doc count before segments are merged/index is optimized
99 writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND));
100 assertEquals(190, writer.maxDoc());
101 writer.addIndexes(new Directory[] { aux3 });
102 assertEquals(230, writer.maxDoc());
105 // make sure the new index is correct
106 verifyNumDocs(dir, 230);
108 verifyTermDocs(dir, new Term("content", "aaa"), 180);
110 verifyTermDocs(dir, new Term("content", "bbb"), 50);
113 writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND));
117 // make sure the new index is correct
118 verifyNumDocs(dir, 230);
120 verifyTermDocs(dir, new Term("content", "aaa"), 180);
122 verifyTermDocs(dir, new Term("content", "bbb"), 50);
124 // now add a single document
125 Directory aux4 = newDirectory();
126 writer = newWriter(aux4, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
130 writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND));
131 assertEquals(230, writer.maxDoc());
132 writer.addIndexes(new Directory[] { aux4 });
133 assertEquals(231, writer.maxDoc());
136 verifyNumDocs(dir, 231);
138 verifyTermDocs(dir, new Term("content", "bbb"), 51);
146 public void testWithPendingDeletes() throws IOException {
148 Directory dir = newDirectory();
149 // auxiliary directory
150 Directory aux = newDirectory();
153 IndexWriter writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND));
154 writer.setInfoStream(VERBOSE ? System.out : null);
155 writer.setInfoStream(VERBOSE ? System.out : null);
156 writer.addIndexes(aux);
158 // Adds 10 docs, then replaces them with another 10
159 // docs, so 10 pending deletes:
160 for (int i = 0; i < 20; i++) {
161 Document doc = new Document();
162 doc.add(newField("id", "" + (i % 10), Field.Store.NO, Field.Index.NOT_ANALYZED));
163 doc.add(newField("content", "bbb " + i, Field.Store.NO,
164 Field.Index.ANALYZED));
165 writer.updateDocument(new Term("id", "" + (i%10)), doc);
167 // Deletes one of the 10 added docs, leaving 9:
168 PhraseQuery q = new PhraseQuery();
169 q.add(new Term("content", "bbb"));
170 q.add(new Term("content", "14"));
171 writer.deleteDocuments(q);
176 verifyNumDocs(dir, 1039);
177 verifyTermDocs(dir, new Term("content", "aaa"), 1030);
178 verifyTermDocs(dir, new Term("content", "bbb"), 9);
185 public void testWithPendingDeletes2() throws IOException {
187 Directory dir = newDirectory();
188 // auxiliary directory
189 Directory aux = newDirectory();
192 IndexWriter writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND));
193 // Adds 10 docs, then replaces them with another 10
194 // docs, so 10 pending deletes:
195 for (int i = 0; i < 20; i++) {
196 Document doc = new Document();
197 doc.add(newField("id", "" + (i % 10), Field.Store.NO, Field.Index.NOT_ANALYZED));
198 doc.add(newField("content", "bbb " + i, Field.Store.NO, Field.Index.ANALYZED));
199 writer.updateDocument(new Term("id", "" + (i%10)), doc);
202 writer.addIndexes(new Directory[] {aux});
204 // Deletes one of the 10 added docs, leaving 9:
205 PhraseQuery q = new PhraseQuery();
206 q.add(new Term("content", "bbb"));
207 q.add(new Term("content", "14"));
208 writer.deleteDocuments(q);
213 verifyNumDocs(dir, 1039);
214 verifyTermDocs(dir, new Term("content", "aaa"), 1030);
215 verifyTermDocs(dir, new Term("content", "bbb"), 9);
222 public void testWithPendingDeletes3() throws IOException {
224 Directory dir = newDirectory();
225 // auxiliary directory
226 Directory aux = newDirectory();
229 IndexWriter writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND));
231 // Adds 10 docs, then replaces them with another 10
232 // docs, so 10 pending deletes:
233 for (int i = 0; i < 20; i++) {
234 Document doc = new Document();
235 doc.add(newField("id", "" + (i % 10), Field.Store.NO, Field.Index.NOT_ANALYZED));
236 doc.add(newField("content", "bbb " + i, Field.Store.NO,
237 Field.Index.ANALYZED));
238 writer.updateDocument(new Term("id", "" + (i%10)), doc);
241 // Deletes one of the 10 added docs, leaving 9:
242 PhraseQuery q = new PhraseQuery();
243 q.add(new Term("content", "bbb"));
244 q.add(new Term("content", "14"));
245 writer.deleteDocuments(q);
247 writer.addIndexes(new Directory[] {aux});
252 verifyNumDocs(dir, 1039);
253 verifyTermDocs(dir, new Term("content", "aaa"), 1030);
254 verifyTermDocs(dir, new Term("content", "bbb"), 9);
261 // case 0: add self or exceed maxMergeDocs, expect exception
262 public void testAddSelf() throws IOException {
264 Directory dir = newDirectory();
265 // auxiliary directory
266 Directory aux = newDirectory();
268 IndexWriter writer = null;
270 writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
272 addDocs(writer, 100);
273 assertEquals(100, writer.maxDoc());
278 newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
279 setOpenMode(OpenMode.CREATE).
280 setMaxBufferedDocs(1000).
281 setMergePolicy(newLogMergePolicy(false))
283 // add 140 documents in separate files
288 newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
289 setOpenMode(OpenMode.CREATE).
290 setMaxBufferedDocs(1000).
291 setMergePolicy(newLogMergePolicy(false))
293 addDocs(writer, 100);
296 writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND));
299 writer.addIndexes(new Directory[] { aux, dir });
302 catch (IllegalArgumentException e) {
303 assertEquals(100, writer.maxDoc());
307 // make sure the index is correct
308 verifyNumDocs(dir, 100);
313 // in all the remaining tests, make the doc count of the oldest segment
314 // in dir large so that it is never merged in addIndexes()
315 // case 1: no tail segments
316 public void testNoTailSegments() throws IOException {
318 Directory dir = newDirectory();
319 // auxiliary directory
320 Directory aux = newDirectory();
324 IndexWriter writer = newWriter(
326 newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
327 setOpenMode(OpenMode.APPEND).
328 setMaxBufferedDocs(10).
329 setMergePolicy(newLogMergePolicy(4))
333 writer.addIndexes(new Directory[] { aux });
334 assertEquals(1040, writer.maxDoc());
335 assertEquals(1000, writer.getDocCount(0));
338 // make sure the index is correct
339 verifyNumDocs(dir, 1040);
344 // case 2: tail segments, invariants hold, no copy
345 public void testNoCopySegments() throws IOException {
347 Directory dir = newDirectory();
348 // auxiliary directory
349 Directory aux = newDirectory();
353 IndexWriter writer = newWriter(
355 newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
356 setOpenMode(OpenMode.APPEND).
357 setMaxBufferedDocs(9).
358 setMergePolicy(newLogMergePolicy(4))
362 writer.addIndexes(new Directory[] { aux });
363 assertEquals(1032, writer.maxDoc());
364 assertEquals(1000, writer.getDocCount(0));
367 // make sure the index is correct
368 verifyNumDocs(dir, 1032);
373 // case 3: tail segments, invariants hold, copy, invariants hold
374 public void testNoMergeAfterCopy() throws IOException {
376 Directory dir = newDirectory();
377 // auxiliary directory
378 Directory aux = newDirectory();
382 IndexWriter writer = newWriter(
384 newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
385 setOpenMode(OpenMode.APPEND).
386 setMaxBufferedDocs(10).
387 setMergePolicy(newLogMergePolicy(4))
390 writer.addIndexes(new Directory[] { aux, new MockDirectoryWrapper(random, new RAMDirectory(aux)) });
391 assertEquals(1060, writer.maxDoc());
392 assertEquals(1000, writer.getDocCount(0));
395 // make sure the index is correct
396 verifyNumDocs(dir, 1060);
401 // case 4: tail segments, invariants hold, copy, invariants not hold
402 public void testMergeAfterCopy() throws IOException {
404 Directory dir = newDirectory();
405 // auxiliary directory
406 Directory aux = newDirectory();
410 IndexReader reader = IndexReader.open(aux, false);
411 for (int i = 0; i < 20; i++) {
412 reader.deleteDocument(i);
414 assertEquals(10, reader.numDocs());
417 IndexWriter writer = newWriter(
419 newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
420 setOpenMode(OpenMode.APPEND).
421 setMaxBufferedDocs(4).
422 setMergePolicy(newLogMergePolicy(4))
425 writer.addIndexes(new Directory[] { aux, new MockDirectoryWrapper(random, new RAMDirectory(aux)) });
426 assertEquals(1020, writer.maxDoc());
427 assertEquals(1000, writer.getDocCount(0));
433 // case 5: tail segments, invariants not hold
434 public void testMoreMerges() throws IOException {
436 Directory dir = newDirectory();
437 // auxiliary directory
438 Directory aux = newDirectory();
439 Directory aux2 = newDirectory();
443 IndexWriter writer = newWriter(
445 newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
446 setOpenMode(OpenMode.CREATE).
447 setMaxBufferedDocs(100).
448 setMergePolicy(newLogMergePolicy(10))
450 writer.setInfoStream(VERBOSE ? System.out : null);
451 writer.addIndexes(aux);
452 assertEquals(30, writer.maxDoc());
455 IndexReader reader = IndexReader.open(aux, false);
456 for (int i = 0; i < 27; i++) {
457 reader.deleteDocument(i);
459 assertEquals(3, reader.numDocs());
462 reader = IndexReader.open(aux2, false);
463 for (int i = 0; i < 8; i++) {
464 reader.deleteDocument(i);
466 assertEquals(22, reader.numDocs());
471 newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
472 setOpenMode(OpenMode.APPEND).
473 setMaxBufferedDocs(6).
474 setMergePolicy(newLogMergePolicy(4))
477 writer.addIndexes(new Directory[] { aux, aux2 });
478 assertEquals(1040, writer.maxDoc());
479 assertEquals(1000, writer.getDocCount(0));
486 private IndexWriter newWriter(Directory dir, IndexWriterConfig conf)
488 conf.setMergePolicy(new LogDocMergePolicy());
489 final IndexWriter writer = new IndexWriter(dir, conf);
493 private void addDocs(IndexWriter writer, int numDocs) throws IOException {
494 for (int i = 0; i < numDocs; i++) {
495 Document doc = new Document();
496 doc.add(newField("content", "aaa", Field.Store.NO,
497 Field.Index.ANALYZED));
498 writer.addDocument(doc);
502 private void addDocs2(IndexWriter writer, int numDocs) throws IOException {
503 for (int i = 0; i < numDocs; i++) {
504 Document doc = new Document();
505 doc.add(newField("content", "bbb", Field.Store.NO,
506 Field.Index.ANALYZED));
507 writer.addDocument(doc);
511 private void verifyNumDocs(Directory dir, int numDocs) throws IOException {
512 IndexReader reader = IndexReader.open(dir, true);
513 assertEquals(numDocs, reader.maxDoc());
514 assertEquals(numDocs, reader.numDocs());
518 private void verifyTermDocs(Directory dir, Term term, int numDocs)
520 IndexReader reader = IndexReader.open(dir, true);
521 TermDocs termDocs = reader.termDocs(term);
523 while (termDocs.next())
525 assertEquals(numDocs, count);
529 private void setUpDirs(Directory dir, Directory aux) throws IOException {
530 IndexWriter writer = null;
532 writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.CREATE).setMaxBufferedDocs(1000));
533 // add 1000 documents in 1 segment
534 addDocs(writer, 1000);
535 assertEquals(1000, writer.maxDoc());
536 assertEquals(1, writer.getSegmentCount());
541 newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
542 setOpenMode(OpenMode.CREATE).
543 setMaxBufferedDocs(1000).
544 setMergePolicy(newLogMergePolicy(false, 10))
546 // add 30 documents in 3 segments
547 for (int i = 0; i < 3; i++) {
552 newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
553 setOpenMode(OpenMode.APPEND).
554 setMaxBufferedDocs(1000).
555 setMergePolicy(newLogMergePolicy(false, 10))
558 assertEquals(30, writer.maxDoc());
559 assertEquals(3, writer.getSegmentCount());
564 public void testHangOnClose() throws IOException {
566 Directory dir = newDirectory();
567 LogByteSizeMergePolicy lmp = new LogByteSizeMergePolicy();
568 lmp.setUseCompoundFile(false);
569 lmp.setMergeFactor(100);
570 IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
571 TEST_VERSION_CURRENT, new MockAnalyzer(random))
572 .setMaxBufferedDocs(5).setMergePolicy(lmp));
574 Document doc = new Document();
575 doc.add(newField("content", "aaa bbb ccc ddd eee fff ggg hhh iii", Field.Store.YES,
576 Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
577 for(int i=0;i<60;i++)
578 writer.addDocument(doc);
580 Document doc2 = new Document();
581 doc2.add(newField("content", "aaa bbb ccc ddd eee fff ggg hhh iii", Field.Store.YES,
583 doc2.add(newField("content", "aaa bbb ccc ddd eee fff ggg hhh iii", Field.Store.YES,
585 doc2.add(newField("content", "aaa bbb ccc ddd eee fff ggg hhh iii", Field.Store.YES,
587 doc2.add(newField("content", "aaa bbb ccc ddd eee fff ggg hhh iii", Field.Store.YES,
589 for(int i=0;i<10;i++)
590 writer.addDocument(doc2);
593 Directory dir2 = newDirectory();
594 lmp = new LogByteSizeMergePolicy();
595 lmp.setMinMergeMB(0.0001);
596 lmp.setUseCompoundFile(false);
597 lmp.setMergeFactor(4);
598 writer = new IndexWriter(dir2, newIndexWriterConfig(TEST_VERSION_CURRENT,
599 new MockAnalyzer(random))
600 .setMergeScheduler(new SerialMergeScheduler()).setMergePolicy(lmp));
601 writer.addIndexes(new Directory[] {dir});
607 // TODO: these are also in TestIndexWriter... add a simple doc-writing method
608 // like this to LuceneTestCase?
609 private void addDoc(IndexWriter writer) throws IOException
611 Document doc = new Document();
612 doc.add(newField("content", "aaa", Field.Store.NO, Field.Index.ANALYZED));
613 writer.addDocument(doc);
616 private abstract class RunAddIndexesThreads {
619 final static int NUM_INIT_DOCS = 17;
621 final List<Throwable> failures = new ArrayList<Throwable>();
622 volatile boolean didClose;
623 final IndexReader[] readers;
625 final static int NUM_THREADS = 5;
626 final Thread[] threads = new Thread[NUM_THREADS];
628 public RunAddIndexesThreads(int numCopy) throws Throwable {
630 dir = new MockDirectoryWrapper(random, new RAMDirectory());
631 IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
632 TEST_VERSION_CURRENT, new MockAnalyzer(random))
633 .setMaxBufferedDocs(2));
634 for (int i = 0; i < NUM_INIT_DOCS; i++)
638 dir2 = newDirectory();
639 writer2 = new IndexWriter(dir2, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
640 writer2.setInfoStream(VERBOSE ? System.out : null);
644 readers = new IndexReader[NUM_COPY];
645 for(int i=0;i<NUM_COPY;i++)
646 readers[i] = IndexReader.open(dir, true);
649 void launchThreads(final int numIter) {
651 for(int i=0;i<NUM_THREADS;i++) {
652 threads[i] = new Thread() {
657 final Directory[] dirs = new Directory[NUM_COPY];
658 for(int k=0;k<NUM_COPY;k++)
659 dirs[k] = new MockDirectoryWrapper(random, new RAMDirectory(dir));
664 // System.out.println(Thread.currentThread().getName() + ": iter j=" + j);
665 if (numIter > 0 && j == numIter)
669 } catch (Throwable t) {
676 for(int i=0;i<NUM_THREADS;i++)
680 void joinThreads() throws Exception {
681 for(int i=0;i<NUM_THREADS;i++)
685 void close(boolean doWait) throws Throwable {
687 writer2.close(doWait);
690 void closeDir() throws Throwable {
691 for(int i=0;i<NUM_COPY;i++)
696 abstract void doBody(int j, Directory[] dirs) throws Throwable;
697 abstract void handle(Throwable t);
700 private class CommitAndAddIndexes extends RunAddIndexesThreads {
701 public CommitAndAddIndexes(int numCopy) throws Throwable {
706 void handle(Throwable t) {
707 t.printStackTrace(System.out);
708 synchronized(failures) {
714 void doBody(int j, Directory[] dirs) throws Throwable {
718 System.out.println(Thread.currentThread().getName() + ": TEST: addIndexes(Dir[]) then optimize");
720 writer2.addIndexes(dirs);
725 System.out.println(Thread.currentThread().getName() + ": TEST: addIndexes(Dir[])");
727 writer2.addIndexes(dirs);
731 System.out.println(Thread.currentThread().getName() + ": TEST: addIndexes(IndexReader[])");
733 writer2.addIndexes(readers);
737 System.out.println(Thread.currentThread().getName() + ": TEST: addIndexes(Dir[]) then maybeMerge");
739 writer2.addIndexes(dirs);
740 writer2.maybeMerge();
744 System.out.println(Thread.currentThread().getName() + ": TEST: commit");
751 // LUCENE-1335: test simultaneous addIndexes & commits
752 // from multiple threads
753 public void testAddIndexesWithThreads() throws Throwable {
755 final int NUM_ITER = TEST_NIGHTLY ? 15 : 5;
756 final int NUM_COPY = 3;
757 CommitAndAddIndexes c = new CommitAndAddIndexes(NUM_COPY);
758 c.writer2.setInfoStream(VERBOSE ? System.out : null);
759 c.launchThreads(NUM_ITER);
761 for(int i=0;i<100;i++)
766 int expectedNumDocs = 100+NUM_COPY*(4*NUM_ITER/5)*RunAddIndexesThreads.NUM_THREADS*RunAddIndexesThreads.NUM_INIT_DOCS;
767 assertEquals(expectedNumDocs, c.writer2.numDocs());
771 assertTrue(c.failures.size() == 0);
773 IndexReader reader = IndexReader.open(c.dir2, true);
774 assertEquals(expectedNumDocs, reader.numDocs());
780 private class CommitAndAddIndexes2 extends CommitAndAddIndexes {
781 public CommitAndAddIndexes2(int numCopy) throws Throwable {
786 void handle(Throwable t) {
787 if (!(t instanceof AlreadyClosedException) && !(t instanceof NullPointerException)) {
788 t.printStackTrace(System.out);
789 synchronized(failures) {
796 // LUCENE-1335: test simultaneous addIndexes & close
797 public void testAddIndexesWithClose() throws Throwable {
798 final int NUM_COPY = 3;
799 CommitAndAddIndexes2 c = new CommitAndAddIndexes2(NUM_COPY);
800 //c.writer2.setInfoStream(System.out);
803 // Close w/o first stopping/joining the threads
811 assertTrue(c.failures.size() == 0);
814 private class CommitAndAddIndexes3 extends RunAddIndexesThreads {
815 public CommitAndAddIndexes3(int numCopy) throws Throwable {
820 void doBody(int j, Directory[] dirs) throws Throwable {
824 System.out.println("TEST: " + Thread.currentThread().getName() + ": addIndexes + optimize");
826 writer2.addIndexes(dirs);
831 System.out.println("TEST: " + Thread.currentThread().getName() + ": addIndexes");
833 writer2.addIndexes(dirs);
837 System.out.println("TEST: " + Thread.currentThread().getName() + ": addIndexes(IR[])");
839 writer2.addIndexes(readers);
843 System.out.println("TEST: " + Thread.currentThread().getName() + ": optimize");
849 System.out.println("TEST: " + Thread.currentThread().getName() + ": commit");
856 void handle(Throwable t) {
857 boolean report = true;
859 if (t instanceof AlreadyClosedException || t instanceof MergePolicy.MergeAbortedException || t instanceof NullPointerException) {
861 } else if (t instanceof FileNotFoundException) {
863 } else if (t instanceof IOException) {
864 Throwable t2 = t.getCause();
865 if (t2 instanceof MergePolicy.MergeAbortedException) {
870 t.printStackTrace(System.out);
871 synchronized(failures) {
878 // LUCENE-1335: test simultaneous addIndexes & close
879 public void testAddIndexesWithCloseNoWait() throws Throwable {
881 final int NUM_COPY = 50;
882 CommitAndAddIndexes3 c = new CommitAndAddIndexes3(NUM_COPY);
884 c.writer2.setInfoStream(System.out);
888 Thread.sleep(_TestUtil.nextInt(random, 10, 500));
890 // Close w/o first stopping/joining the threads
892 System.out.println("TEST: now close(false)");
899 System.out.println("TEST: done join threads");
903 assertTrue(c.failures.size() == 0);
906 // LUCENE-1335: test simultaneous addIndexes & close
907 public void testAddIndexesWithRollback() throws Throwable {
909 final int NUM_COPY = TEST_NIGHTLY ? 50 : 5;
910 CommitAndAddIndexes3 c = new CommitAndAddIndexes3(NUM_COPY);
913 Thread.sleep(_TestUtil.nextInt(random, 10, 500));
915 // Close w/o first stopping/joining the threads
917 System.out.println("TEST: now force rollback");
920 c.writer2.rollback();
926 assertTrue(c.failures.size() == 0);
929 // LUCENE-2790: tests that the non CFS files were deleted by addIndexes
930 public void testNonCFSLeftovers() throws Exception {
931 Directory[] dirs = new Directory[2];
932 for (int i = 0; i < dirs.length; i++) {
933 dirs[i] = new RAMDirectory();
934 IndexWriter w = new IndexWriter(dirs[i], new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
935 Document d = new Document();
936 d.add(new Field("c", "v", Store.YES, Index.ANALYZED, TermVector.YES));
941 IndexReader[] readers = new IndexReader[] { IndexReader.open(dirs[0]), IndexReader.open(dirs[1]) };
943 Directory dir = new RAMDirectory();
944 IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy());
945 LogMergePolicy lmp = (LogMergePolicy) conf.getMergePolicy();
946 lmp.setNoCFSRatio(1.0); // Force creation of CFS
947 lmp.setUseCompoundFile(true);
948 IndexWriter w3 = new IndexWriter(dir, conf);
949 w3.addIndexes(readers);
952 assertEquals("Only one compound segment should exist", 3, dir.listAll().length);
955 // LUCENE-2996: tests that addIndexes(IndexReader) applies existing deletes correctly.
956 public void testExistingDeletes() throws Exception {
957 Directory[] dirs = new Directory[2];
958 for (int i = 0; i < dirs.length; i++) {
959 dirs[i] = newDirectory();
960 IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random));
961 IndexWriter writer = new IndexWriter(dirs[i], conf);
962 Document doc = new Document();
963 doc.add(new Field("id", "myid", Store.NO, Index.NOT_ANALYZED_NO_NORMS));
964 writer.addDocument(doc);
968 IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random));
969 IndexWriter writer = new IndexWriter(dirs[0], conf);
971 // Now delete the document
972 writer.deleteDocuments(new Term("id", "myid"));
973 IndexReader r = IndexReader.open(dirs[1]);
975 writer.addIndexes(r);
980 assertEquals("Documents from the incoming index should not have been deleted", 1, writer.numDocs());
983 for (Directory dir : dirs) {
989 // LUCENE-3126: tests that if a non-CFS segment is copied, it is converted to
990 // a CFS, given MP preferences
991 public void testCopyIntoCFS() throws Exception {
992 // create an index, no CFS (so we can assert that existing segments are not affected)
993 Directory target = newDirectory();
994 LogMergePolicy lmp = newLogMergePolicy(false);
995 IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, null).setMergePolicy(lmp);
996 IndexWriter w = new IndexWriter(target, conf);
997 w.addDocument(new Document());
999 assertFalse(w.segmentInfos.info(0).getUseCompoundFile());
1001 // prepare second index, no-CFS too + .del file + separate norms file
1002 Directory src = newDirectory();
1003 LogMergePolicy lmp2 = newLogMergePolicy(false);
1004 IndexWriterConfig conf2 = newIndexWriterConfig(TEST_VERSION_CURRENT,
1005 new MockAnalyzer(random)).setMergePolicy(lmp2);
1006 IndexWriter w2 = new IndexWriter(src, conf2);
1007 Document doc = new Document();
1008 doc.add(new Field("c", "some text", Store.YES, Index.ANALYZED));
1009 w2.addDocument(doc);
1010 doc = new Document();
1011 doc.add(new Field("d", "delete", Store.NO, Index.NOT_ANALYZED_NO_NORMS));
1012 w2.addDocument(doc);
1014 w2.deleteDocuments(new Term("d", "delete"));
1018 // create separate norms file
1019 IndexReader r = IndexReader.open(src, false);
1020 r.setNorm(0, "c", (byte) 1);
1022 assertTrue(".del file not found", src.fileExists("_0_1.del"));
1023 assertTrue("separate norms file not found", src.fileExists("_0_1.s0"));
1025 // Case 1: force 'CFS' on target
1026 lmp.setUseCompoundFile(true);
1027 lmp.setNoCFSRatio(1.0);
1030 assertFalse("existing segments should not be modified by addIndexes", w.segmentInfos.info(0).getUseCompoundFile());
1031 assertTrue("segment should have been converted to a CFS by addIndexes", w.segmentInfos.info(1).getUseCompoundFile());
1032 assertTrue(".del file not found", target.fileExists("_1_1.del"));
1033 assertTrue("separate norms file not found", target.fileExists("_1_1.s0"));
1035 // Case 2: LMP disallows CFS
1036 lmp.setUseCompoundFile(false);
1039 assertFalse("segment should not have been converted to a CFS by addIndexes if MP disallows", w.segmentInfos.info(2).getUseCompoundFile());