1 package org.apache.lucene.index;
4 * Licensed to the Apache Software Foundation (ASF) under one or more
5 * contributor license agreements. See the NOTICE file distributed with
6 * this work for additional information regarding copyright ownership.
7 * The ASF licenses this file to You under the Apache License, Version 2.0
8 * (the "License"); you may not use this file except in compliance with
9 * the License. You may obtain a copy of the License at
11 * http://www.apache.org/licenses/LICENSE-2.0
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
20 import org.apache.lucene.store.MockDirectoryWrapper;
21 import org.apache.lucene.analysis.MockAnalyzer;
22 import org.apache.lucene.document.Document;
23 import org.apache.lucene.document.Field;
24 import org.apache.lucene.index.IndexWriterConfig.OpenMode;
26 import org.apache.lucene.util.LuceneTestCase;
27 import java.io.IOException;
29 public class TestConcurrentMergeScheduler extends LuceneTestCase {
31 private static class FailOnlyOnFlush extends MockDirectoryWrapper.Failure {
36 public void setDoFail() {
41 public void clearDoFail() {
46 public void eval(MockDirectoryWrapper dir) throws IOException {
47 if (doFail && (Thread.currentThread().getName().equals("main")
48 || Thread.currentThread().getName().equals("Main Thread"))) {
49 boolean isDoFlush = false;
50 boolean isClose = false;
51 StackTraceElement[] trace = new Exception().getStackTrace();
52 for (int i = 0; i < trace.length; i++) {
53 if ("doFlush".equals(trace[i].getMethodName())) {
56 if ("close".equals(trace[i].getMethodName())) {
60 if (isDoFlush && !isClose && random.nextBoolean()) {
62 throw new IOException(Thread.currentThread().getName() + ": now failing during flush");
68 // Make sure running BG merges still work fine even when
69 // we are hitting exceptions during flushing.
70 public void testFlushExceptions() throws IOException {
71 MockDirectoryWrapper directory = newDirectory();
72 FailOnlyOnFlush failure = new FailOnlyOnFlush();
73 directory.failOn(failure);
75 IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(2));
76 writer.setInfoStream(VERBOSE ? System.out : null);
77 Document doc = new Document();
78 Field idField = newField("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
82 for(int i=0;i<10;i++) {
84 System.out.println("TEST: iter=" + i);
87 for(int j=0;j<20;j++) {
88 idField.setValue(Integer.toString(i*20+j));
89 writer.addDocument(doc);
92 // must cycle here because sometimes the merge flushes
93 // the doc we just added and so there's nothing to
94 // flush, and we don't hit the exception
96 writer.addDocument(doc);
99 writer.flush(true, true);
100 if (failure.hitExc) {
101 fail("failed to hit IOException");
104 } catch (IOException ioe) {
106 ioe.printStackTrace(System.out);
108 failure.clearDoFail();
112 assertEquals(20*(i+1)+extraCount, writer.numDocs());
116 IndexReader reader = IndexReader.open(directory, true);
117 assertEquals(200+extraCount, reader.numDocs());
122 // Test that deletes committed after a merge started and
123 // before it finishes, are correctly merged back:
124 public void testDeleteMerging() throws IOException {
125 MockDirectoryWrapper directory = newDirectory();
127 LogDocMergePolicy mp = new LogDocMergePolicy();
128 // Force degenerate merging so we can get a mix of
129 // merging of segments with and without deletes at the
131 mp.setMinMergeDocs(1000);
132 IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig(
133 TEST_VERSION_CURRENT, new MockAnalyzer(random))
134 .setMergePolicy(mp));
135 writer.setInfoStream(VERBOSE ? System.out : null);
137 Document doc = new Document();
138 Field idField = newField("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
140 for(int i=0;i<10;i++) {
142 System.out.println("\nTEST: cycle");
144 for(int j=0;j<100;j++) {
145 idField.setValue(Integer.toString(i*100+j));
146 writer.addDocument(doc);
150 while(delID < 100*(1+i)) {
152 System.out.println("TEST: del " + delID);
154 writer.deleteDocuments(new Term("id", ""+delID));
162 IndexReader reader = IndexReader.open(directory, true);
163 // Verify that we did not lose any deletes...
164 assertEquals(450, reader.numDocs());
169 public void testNoExtraFiles() throws IOException {
170 MockDirectoryWrapper directory = newDirectory();
171 IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig(
172 TEST_VERSION_CURRENT, new MockAnalyzer(random))
173 .setMaxBufferedDocs(2));
174 writer.setInfoStream(VERBOSE ? System.out : null);
176 for(int iter=0;iter<7;iter++) {
178 System.out.println("TEST: iter=" + iter);
181 for(int j=0;j<21;j++) {
182 Document doc = new Document();
183 doc.add(newField("content", "a b c", Field.Store.NO, Field.Index.ANALYZED));
184 writer.addDocument(doc);
188 TestIndexWriter.assertNoUnreferencedFiles(directory, "testNoExtraFiles");
191 writer = new IndexWriter(directory, newIndexWriterConfig(
192 TEST_VERSION_CURRENT, new MockAnalyzer(random))
193 .setOpenMode(OpenMode.APPEND).setMaxBufferedDocs(2));
194 writer.setInfoStream(VERBOSE ? System.out : null);
202 public void testNoWaitClose() throws IOException {
203 MockDirectoryWrapper directory = newDirectory();
204 Document doc = new Document();
205 Field idField = newField("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
208 IndexWriter writer = new IndexWriter(
210 newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
211 setMaxBufferedDocs(2).
212 setMergePolicy(newLogMergePolicy(100))
215 for(int iter=0;iter<10;iter++) {
217 for(int j=0;j<201;j++) {
218 idField.setValue(Integer.toString(iter*201+j));
219 writer.addDocument(doc);
222 int delID = iter*201;
223 for(int j=0;j<20;j++) {
224 writer.deleteDocuments(new Term("id", Integer.toString(delID)));
228 // Force a bunch of merge threads to kick off so we
229 // stress out aborting them on close:
230 ((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(3);
231 writer.addDocument(doc);
236 IndexReader reader = IndexReader.open(directory, true);
237 assertEquals((1+iter)*182, reader.numDocs());
241 writer = new IndexWriter(
243 newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
244 setOpenMode(OpenMode.APPEND).
245 setMergePolicy(newLogMergePolicy(100))