1 package org.apache.lucene.index;
4 * Licensed to the Apache Software Foundation (ASF) under one or more
5 * contributor license agreements. See the NOTICE file distributed with
6 * this work for additional information regarding copyright ownership.
7 * The ASF licenses this file to You under the Apache License, Version 2.0
8 * (the "License"); you may not use this file except in compliance with
9 * the License. You may obtain a copy of the License at
11 * http://www.apache.org/licenses/LICENSE-2.0
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
20 import java.io.IOException;
22 import org.apache.lucene.analysis.MockAnalyzer;
23 import org.apache.lucene.document.Document;
24 import org.apache.lucene.document.Field;
25 import org.apache.lucene.search.DefaultSimilarity;
26 import org.apache.lucene.search.IndexSearcher;
27 import org.apache.lucene.search.ScoreDoc;
28 import org.apache.lucene.search.Similarity;
29 import org.apache.lucene.search.TermQuery;
30 import org.apache.lucene.store.MockDirectoryWrapper;
31 import org.apache.lucene.store.RAMDirectory;
32 import org.apache.lucene.util.LuceneTestCase;
33 import org.apache.lucene.util._TestUtil;
35 public class TestIndexReaderOnDiskFull extends LuceneTestCase {
37 * Make sure if reader tries to commit but hits disk
38 * full that reader remains consistent and usable.
40 public void testDiskFull() throws IOException {
42 Term searchTerm = new Term("content", "aaa");
43 int START_COUNT = 157;
46 // First build up a starting index:
47 MockDirectoryWrapper startDir = newDirectory();
48 IndexWriter writer = new IndexWriter(startDir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
50 System.out.println("TEST: create initial index");
51 writer.setInfoStream(System.out);
53 for(int i=0;i<157;i++) {
54 Document d = new Document();
55 d.add(newField("id", Integer.toString(i), Field.Store.YES, Field.Index.NOT_ANALYZED));
56 d.add(newField("content", "aaa " + i, Field.Store.NO, Field.Index.ANALYZED));
57 writer.addDocument(d);
64 IndexReader r = IndexReader.open(startDir);
65 IndexSearcher searcher = newSearcher(r);
66 ScoreDoc[] hits = null;
68 hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
69 } catch (IOException e) {
71 fail("exception when init searching: " + e);
77 long diskUsage = startDir.getRecomputedActualSizeInBytes();
78 long diskFree = diskUsage+_TestUtil.nextInt(random, 50, 200);
80 IOException err = null;
83 boolean gotExc = false;
85 // Iterate w/ ever increasing free disk space:
87 MockDirectoryWrapper dir = new MockDirectoryWrapper(random, new RAMDirectory(startDir));
89 // If IndexReader hits disk full, it can write to
90 // the same files again.
91 dir.setPreventDoubleWrite(false);
93 IndexReader reader = IndexReader.open(dir, false);
95 // For each disk size, first try to commit against
96 // dir that will hit random IOExceptions & disk
97 // full; after, give it infinite disk space & turn
98 // off random IOExceptions & retry w/ same reader:
99 boolean success = false;
101 for(int x=0;x<2;x++) {
104 double diskRatio = ((double) diskFree)/diskUsage;
109 thisDiskFree = diskFree;
110 if (diskRatio >= 2.0) {
113 if (diskRatio >= 4.0) {
116 if (diskRatio >= 6.0) {
120 System.out.println("\ncycle: " + diskFree + " bytes");
122 testName = "disk full during reader.close() @ " + thisDiskFree + " bytes";
127 System.out.println("\ncycle: same writer: unlimited disk space");
129 testName = "reader re-use after disk full";
132 dir.setMaxSizeInBytes(thisDiskFree);
133 dir.setRandomIOExceptionRate(rate);
134 Similarity sim = new DefaultSimilarity();
138 for(int i=0;i<13;i++) {
139 reader.deleteDocument(docId);
140 reader.setNorm(docId, "content", sim.encodeNormValue(2.0f));
149 } catch (IOException e) {
151 System.out.println(" hit IOException: " + e);
152 e.printStackTrace(System.out);
158 fail(testName + " hit IOException after disk space was freed up");
162 // Finally, verify index is not corrupt, and, if
163 // we succeeded, we see all docs changed, and if
164 // we failed, we see either all docs or no docs
165 // changed (transactional semantics):
166 IndexReader newReader = null;
168 newReader = IndexReader.open(dir, false);
169 } catch (IOException e) {
171 fail(testName + ":exception when creating IndexReader after disk full during close: " + e);
174 int result = newReader.docFreq(searchTerm);
176 if (result != END_COUNT) {
177 fail(testName + ": method did not throw exception but docFreq('aaa') is " + result + " instead of expected " + END_COUNT);
180 // On hitting exception we still may have added
182 if (result != START_COUNT && result != END_COUNT) {
183 err.printStackTrace();
184 fail(testName + ": method did throw exception but docFreq('aaa') is " + result + " instead of expected " + START_COUNT + " or " + END_COUNT);
189 IndexSearcher searcher = newSearcher(newReader);
190 ScoreDoc[] hits = null;
192 hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
193 } catch (IOException e) {
195 fail(testName + ": exception when searching: " + e);
197 int result2 = hits.length;
199 if (result2 != END_COUNT) {
200 fail(testName + ": method did not throw exception but hits.length for search on term 'aaa' is " + result2 + " instead of expected " + END_COUNT);
203 // On hitting exception we still may have added
205 if (result2 != START_COUNT && result2 != END_COUNT) {
206 err.printStackTrace();
207 fail(testName + ": method did throw exception but hits.length for search on term 'aaa' is " + result2 + " instead of expected " + START_COUNT);
214 if (result2 == END_COUNT) {
216 fail("never hit disk full");
223 // Try again with more bytes of free space:
224 diskFree += TEST_NIGHTLY ? _TestUtil.nextInt(random, 5, 20) : _TestUtil.nextInt(random, 50, 200);