X-Git-Url: https://git.mdrn.pl/pylucene.git/blobdiff_plain/a2e61f0c04805cfcb8706176758d1283c7e3a55c..aaeed5504b982cf3545252ab528713250aa33eed:/lucene-java-3.5.0/lucene/backwards/src/test/org/apache/lucene/index/TestIndexWriterOnDiskFull.java?ds=sidebyside diff --git a/lucene-java-3.5.0/lucene/backwards/src/test/org/apache/lucene/index/TestIndexWriterOnDiskFull.java b/lucene-java-3.5.0/lucene/backwards/src/test/org/apache/lucene/index/TestIndexWriterOnDiskFull.java new file mode 100644 index 0000000..6071017 --- /dev/null +++ b/lucene-java-3.5.0/lucene/backwards/src/test/org/apache/lucene/index/TestIndexWriterOnDiskFull.java @@ -0,0 +1,555 @@ +package org.apache.lucene.index; + +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import java.io.IOException; + +import org.apache.lucene.analysis.MockAnalyzer; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.index.IndexWriterConfig.OpenMode; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.ScoreDoc; +import org.apache.lucene.search.TermQuery; +import org.apache.lucene.store.Directory; +import org.apache.lucene.store.MockDirectoryWrapper; +import org.apache.lucene.store.RAMDirectory; +import org.apache.lucene.util.LuceneTestCase; +import org.apache.lucene.util._TestUtil; + +import static org.apache.lucene.index.TestIndexWriter.assertNoUnreferencedFiles; + +/** + * Tests for IndexWriter when the disk runs out of space + */ +public class TestIndexWriterOnDiskFull extends LuceneTestCase { + + /* + * Make sure IndexWriter cleans up on hitting a disk + * full exception in addDocument. + * TODO: how to do this on windows with FSDirectory? + */ + public void testAddDocumentOnDiskFull() throws IOException { + + for(int pass=0;pass<2;pass++) { + if (VERBOSE) { + System.out.println("TEST: pass=" + pass); + } + boolean doAbort = pass == 1; + long diskFree = _TestUtil.nextInt(random, 100, 300); + while(true) { + if (VERBOSE) { + System.out.println("TEST: cycle: diskFree=" + diskFree); + } + MockDirectoryWrapper dir = new MockDirectoryWrapper(random, new RAMDirectory()); + dir.setMaxSizeInBytes(diskFree); + IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))); + writer.setInfoStream(VERBOSE ? System.out : null); + MergeScheduler ms = writer.getConfig().getMergeScheduler(); + if (ms instanceof ConcurrentMergeScheduler) { + // This test intentionally produces exceptions + // in the threads that CMS launches; we don't + // want to pollute test output with these. + ((ConcurrentMergeScheduler) ms).setSuppressExceptions(); + } + + boolean hitError = false; + try { + for(int i=0;i<200;i++) { + addDoc(writer); + } + if (VERBOSE) { + System.out.println("TEST: done adding docs; now commit"); + } + writer.commit(); + } catch (IOException e) { + if (VERBOSE) { + System.out.println("TEST: exception on addDoc"); + e.printStackTrace(System.out); + } + hitError = true; + } + + if (hitError) { + if (doAbort) { + if (VERBOSE) { + System.out.println("TEST: now rollback"); + } + writer.rollback(); + } else { + try { + if (VERBOSE) { + System.out.println("TEST: now close"); + } + writer.close(); + } catch (IOException e) { + if (VERBOSE) { + System.out.println("TEST: exception on close; retry w/ no disk space limit"); + e.printStackTrace(System.out); + } + dir.setMaxSizeInBytes(0); + writer.close(); + } + } + + //_TestUtil.syncConcurrentMerges(ms); + + if (_TestUtil.anyFilesExceptWriteLock(dir)) { + assertNoUnreferencedFiles(dir, "after disk full during addDocument"); + + // Make sure reader can open the index: + IndexReader.open(dir, true).close(); + } + + dir.close(); + // Now try again w/ more space: + + diskFree += TEST_NIGHTLY ? _TestUtil.nextInt(random, 400, 600) : _TestUtil.nextInt(random, 3000, 5000); + } else { + //_TestUtil.syncConcurrentMerges(writer); + dir.setMaxSizeInBytes(0); + writer.close(); + dir.close(); + break; + } + } + } + } + + // TODO: make @Nightly variant that provokes more disk + // fulls + + // TODO: have test fail if on any given top + // iter there was not a single IOE hit + + /* + Test: make sure when we run out of disk space or hit + random IOExceptions in any of the addIndexes(*) calls + that 1) index is not corrupt (searcher can open/search + it) and 2) transactional semantics are followed: + either all or none of the incoming documents were in + fact added. + */ + public void testAddIndexOnDiskFull() throws IOException + { + int START_COUNT = 57; + int NUM_DIR = 50; + int END_COUNT = START_COUNT + NUM_DIR*25; + + // Build up a bunch of dirs that have indexes which we + // will then merge together by calling addIndexes(*): + Directory[] dirs = new Directory[NUM_DIR]; + long inputDiskUsage = 0; + for(int i=0;i= 2.0) { + rate /= 2; + } + if (diskRatio >= 4.0) { + rate /= 2; + } + if (diskRatio >= 6.0) { + rate = 0.0; + } + if (VERBOSE) + testName = "disk full test " + methodName + " with disk full at " + diskFree + " bytes"; + } else { + thisDiskFree = 0; + rate = 0.0; + if (VERBOSE) + testName = "disk full test " + methodName + " with unlimited disk space"; + } + + if (VERBOSE) + System.out.println("\ncycle: " + testName); + + dir.setTrackDiskUsage(true); + dir.setMaxSizeInBytes(thisDiskFree); + dir.setRandomIOExceptionRate(rate); + + try { + + if (0 == method) { + writer.addIndexes(dirs); + writer.optimize(); + } else if (1 == method) { + IndexReader readers[] = new IndexReader[dirs.length]; + for(int i=0;i