1 package org.apache.lucene.index;
4 * Licensed to the Apache Software Foundation (ASF) under one or more
5 * contributor license agreements. See the NOTICE file distributed with
6 * this work for additional information regarding copyright ownership.
7 * The ASF licenses this file to You under the Apache License, Version 2.0
8 * (the "License"); you may not use this file except in compliance with
9 * the License. You may obtain a copy of the License at
11 * http://www.apache.org/licenses/LICENSE-2.0
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
20 import org.apache.lucene.store.MockDirectoryWrapper;
21 import org.apache.lucene.analysis.MockAnalyzer;
22 import org.apache.lucene.document.Document;
23 import org.apache.lucene.document.Field;
24 import org.apache.lucene.index.IndexWriterConfig.OpenMode;
26 import org.apache.lucene.util.LuceneTestCase;
27 import java.io.IOException;
29 public class TestConcurrentMergeScheduler extends LuceneTestCase {
31 private class FailOnlyOnFlush extends MockDirectoryWrapper.Failure {
36 public void setDoFail() {
41 public void clearDoFail() {
46 public void eval(MockDirectoryWrapper dir) throws IOException {
47 if (doFail && isTestThread()) {
48 boolean isDoFlush = false;
49 boolean isClose = false;
50 StackTraceElement[] trace = new Exception().getStackTrace();
51 for (int i = 0; i < trace.length; i++) {
52 if ("doFlush".equals(trace[i].getMethodName())) {
55 if ("close".equals(trace[i].getMethodName())) {
59 if (isDoFlush && !isClose && random.nextBoolean()) {
61 throw new IOException(Thread.currentThread().getName() + ": now failing during flush");
67 // Make sure running BG merges still work fine even when
68 // we are hitting exceptions during flushing.
69 public void testFlushExceptions() throws IOException {
70 MockDirectoryWrapper directory = newDirectory();
71 FailOnlyOnFlush failure = new FailOnlyOnFlush();
72 directory.failOn(failure);
74 IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(2));
75 writer.setInfoStream(VERBOSE ? System.out : null);
76 Document doc = new Document();
77 Field idField = newField("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
81 for(int i=0;i<10;i++) {
83 System.out.println("TEST: iter=" + i);
86 for(int j=0;j<20;j++) {
87 idField.setValue(Integer.toString(i*20+j));
88 writer.addDocument(doc);
91 // must cycle here because sometimes the merge flushes
92 // the doc we just added and so there's nothing to
93 // flush, and we don't hit the exception
95 writer.addDocument(doc);
98 writer.flush(true, true);
100 fail("failed to hit IOException");
103 } catch (IOException ioe) {
105 ioe.printStackTrace(System.out);
107 failure.clearDoFail();
111 assertEquals(20*(i+1)+extraCount, writer.numDocs());
115 IndexReader reader = IndexReader.open(directory, true);
116 assertEquals(200+extraCount, reader.numDocs());
121 // Test that deletes committed after a merge started and
122 // before it finishes, are correctly merged back:
123 public void testDeleteMerging() throws IOException {
124 MockDirectoryWrapper directory = newDirectory();
126 LogDocMergePolicy mp = new LogDocMergePolicy();
127 // Force degenerate merging so we can get a mix of
128 // merging of segments with and without deletes at the
130 mp.setMinMergeDocs(1000);
131 IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig(
132 TEST_VERSION_CURRENT, new MockAnalyzer(random))
133 .setMergePolicy(mp));
134 writer.setInfoStream(VERBOSE ? System.out : null);
136 Document doc = new Document();
137 Field idField = newField("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
139 for(int i=0;i<10;i++) {
141 System.out.println("\nTEST: cycle");
143 for(int j=0;j<100;j++) {
144 idField.setValue(Integer.toString(i*100+j));
145 writer.addDocument(doc);
149 while(delID < 100*(1+i)) {
151 System.out.println("TEST: del " + delID);
153 writer.deleteDocuments(new Term("id", ""+delID));
161 IndexReader reader = IndexReader.open(directory, true);
162 // Verify that we did not lose any deletes...
163 assertEquals(450, reader.numDocs());
168 public void testNoExtraFiles() throws IOException {
169 MockDirectoryWrapper directory = newDirectory();
170 IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig(
171 TEST_VERSION_CURRENT, new MockAnalyzer(random))
172 .setMaxBufferedDocs(2));
173 writer.setInfoStream(VERBOSE ? System.out : null);
175 for(int iter=0;iter<7;iter++) {
177 System.out.println("TEST: iter=" + iter);
180 for(int j=0;j<21;j++) {
181 Document doc = new Document();
182 doc.add(newField("content", "a b c", Field.Store.NO, Field.Index.ANALYZED));
183 writer.addDocument(doc);
187 TestIndexWriter.assertNoUnreferencedFiles(directory, "testNoExtraFiles");
190 writer = new IndexWriter(directory, newIndexWriterConfig(
191 TEST_VERSION_CURRENT, new MockAnalyzer(random))
192 .setOpenMode(OpenMode.APPEND).setMaxBufferedDocs(2));
193 writer.setInfoStream(VERBOSE ? System.out : null);
201 public void testNoWaitClose() throws IOException {
202 MockDirectoryWrapper directory = newDirectory();
203 Document doc = new Document();
204 Field idField = newField("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
207 IndexWriter writer = new IndexWriter(
209 newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
210 setMaxBufferedDocs(2).
211 setMergePolicy(newLogMergePolicy(100))
214 for(int iter=0;iter<10;iter++) {
216 for(int j=0;j<201;j++) {
217 idField.setValue(Integer.toString(iter*201+j));
218 writer.addDocument(doc);
221 int delID = iter*201;
222 for(int j=0;j<20;j++) {
223 writer.deleteDocuments(new Term("id", Integer.toString(delID)));
227 // Force a bunch of merge threads to kick off so we
228 // stress out aborting them on close:
229 ((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(3);
230 writer.addDocument(doc);
235 IndexReader reader = IndexReader.open(directory, true);
236 assertEquals((1+iter)*182, reader.numDocs());
240 writer = new IndexWriter(
242 newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
243 setOpenMode(OpenMode.APPEND).
244 setMergePolicy(newLogMergePolicy(100))