X-Git-Url: https://git.mdrn.pl/pylucene.git/blobdiff_plain/a2e61f0c04805cfcb8706176758d1283c7e3a55c..aaeed5504b982cf3545252ab528713250aa33eed:/lucene-java-3.4.0/lucene/backwards/src/test/org/apache/lucene/index/TestStressIndexing2.java diff --git a/lucene-java-3.4.0/lucene/backwards/src/test/org/apache/lucene/index/TestStressIndexing2.java b/lucene-java-3.4.0/lucene/backwards/src/test/org/apache/lucene/index/TestStressIndexing2.java deleted file mode 100644 index 9f914d8..0000000 --- a/lucene-java-3.4.0/lucene/backwards/src/test/org/apache/lucene/index/TestStressIndexing2.java +++ /dev/null @@ -1,711 +0,0 @@ -package org.apache.lucene.index; - -/** - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.Comparator; -import java.util.HashMap; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.Random; - -import junit.framework.Assert; - -import org.apache.lucene.analysis.WhitespaceAnalyzer; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; -import org.apache.lucene.document.Fieldable; -import org.apache.lucene.index.IndexWriterConfig.OpenMode; -import org.apache.lucene.search.TermQuery; -import org.apache.lucene.store.Directory; -import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util.StringHelper; -import org.apache.lucene.util._TestUtil; - -public class TestStressIndexing2 extends LuceneTestCase { - static int maxFields=4; - static int bigFieldSize=10; - static boolean sameFieldOrder=false; - static int mergeFactor=3; - static int maxBufferedDocs=3; - static int seed=0; - - public class MockIndexWriter extends IndexWriter { - - public MockIndexWriter(Directory dir, IndexWriterConfig conf) throws IOException { - super(dir, conf); - } - - @Override - boolean testPoint(String name) { - // if (name.equals("startCommit")) { - if (random.nextInt(4) == 2) - Thread.yield(); - return true; - } - } - - public void testRandomIWReader() throws Throwable { - Directory dir = newDirectory(); - - // TODO: verify equals using IW.getReader - DocsAndWriter dw = indexRandomIWReader(5, 3, 100, dir); - IndexReader reader = dw.writer.getReader(); - dw.writer.commit(); - verifyEquals(random, reader, dir, "id"); - reader.close(); - dw.writer.close(); - dir.close(); - } - - public void testRandom() throws Throwable { - Directory dir1 = newDirectory(); - Directory dir2 = newDirectory(); - // mergeFactor=2; maxBufferedDocs=2; Map docs = indexRandom(1, 3, 2, dir1); - int maxThreadStates = 1+random.nextInt(10); - boolean doReaderPooling = random.nextBoolean(); - Map docs = indexRandom(5, 3, 100, dir1, maxThreadStates, doReaderPooling); - indexSerial(random, docs, dir2); - - // verifying verify - // verifyEquals(dir1, dir1, "id"); - // verifyEquals(dir2, dir2, "id"); - - verifyEquals(dir1, dir2, "id"); - dir1.close(); - dir2.close(); - } - - public void testMultiConfig() throws Throwable { - // test lots of smaller different params together - int num = atLeast(3); - for (int i = 0; i < num; i++) { // increase iterations for better testing - if (VERBOSE) { - System.out.println("\n\nTEST: top iter=" + i); - } - sameFieldOrder=random.nextBoolean(); - mergeFactor=random.nextInt(3)+2; - maxBufferedDocs=random.nextInt(3)+2; - int maxThreadStates = 1+random.nextInt(10); - boolean doReaderPooling = random.nextBoolean(); - seed++; - - int nThreads=random.nextInt(5)+1; - int iter=random.nextInt(5)+1; - int range=random.nextInt(20)+1; - Directory dir1 = newDirectory(); - Directory dir2 = newDirectory(); - if (VERBOSE) { - System.out.println(" nThreads=" + nThreads + " iter=" + iter + " range=" + range + " doPooling=" + doReaderPooling + " maxThreadStates=" + maxThreadStates + " sameFieldOrder=" + sameFieldOrder + " mergeFactor=" + mergeFactor); - } - Map docs = indexRandom(nThreads, iter, range, dir1, maxThreadStates, doReaderPooling); - if (VERBOSE) { - System.out.println("TEST: index serial"); - } - indexSerial(random, docs, dir2); - if (VERBOSE) { - System.out.println("TEST: verify"); - } - verifyEquals(dir1, dir2, "id"); - dir1.close(); - dir2.close(); - } - } - - - static Term idTerm = new Term("id",""); - IndexingThread[] threads; - static Comparator fieldNameComparator = new Comparator() { - public int compare(Fieldable o1, Fieldable o2) { - return o1.name().compareTo(o2.name()); - } - }; - - // This test avoids using any extra synchronization in the multiple - // indexing threads to test that IndexWriter does correctly synchronize - // everything. - - public static class DocsAndWriter { - Map docs; - IndexWriter writer; - } - - public DocsAndWriter indexRandomIWReader(int nThreads, int iterations, int range, Directory dir) throws IOException, InterruptedException { - Map docs = new HashMap(); - IndexWriter w = new MockIndexWriter(dir, newIndexWriterConfig( - TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).setOpenMode(OpenMode.CREATE).setRAMBufferSizeMB( - 0.1).setMaxBufferedDocs(maxBufferedDocs).setMergePolicy(newLogMergePolicy())); - w.setInfoStream(VERBOSE ? System.out : null); - w.commit(); - setUseCompoundFile(w.getConfig().getMergePolicy(), false); - setMergeFactor(w.getConfig().getMergePolicy(), mergeFactor); - /*** - w.setMaxMergeDocs(Integer.MAX_VALUE); - w.setMaxFieldLength(10000); - w.setRAMBufferSizeMB(1); - w.setMergeFactor(10); - ***/ - - threads = new IndexingThread[nThreads]; - for (int i=0; i indexRandom(int nThreads, int iterations, int range, Directory dir, int maxThreadStates, - boolean doReaderPooling) throws IOException, InterruptedException { - Map docs = new HashMap(); - for(int iter=0;iter<3;iter++) { - if (VERBOSE) { - System.out.println("TEST: iter=" + iter); - } - IndexWriter w = new MockIndexWriter(dir, newIndexWriterConfig( - TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).setOpenMode(OpenMode.CREATE) - .setRAMBufferSizeMB(0.1).setMaxBufferedDocs(maxBufferedDocs).setMaxThreadStates(maxThreadStates) - .setReaderPooling(doReaderPooling).setMergePolicy(newLogMergePolicy())); - w.setInfoStream(VERBOSE ? System.out : null); - setUseCompoundFile(w.getConfig().getMergePolicy(), false); - setMergeFactor(w.getConfig().getMergePolicy(), mergeFactor); - - threads = new IndexingThread[nThreads]; - for (int i=0; i docs, Directory dir) throws IOException { - IndexWriter w = new IndexWriter(dir, LuceneTestCase.newIndexWriterConfig(random, TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).setMergePolicy(newLogMergePolicy())); - - // index all docs in a single thread - Iterator iter = docs.values().iterator(); - while (iter.hasNext()) { - Document d = iter.next(); - ArrayList fields = new ArrayList(); - fields.addAll(d.getFields()); - // put fields in same order each time - Collections.sort(fields, fieldNameComparator); - - Document d1 = new Document(); - d1.setBoost(d.getBoost()); - for (int i=0; i0) break; - if (!termEnum1.next()) break; - } - - // iterate until we get some docs - int len2; - for(;;) { - len2=0; - term2 = termEnum2.term(); - if (term2==null) break; - termDocs2.seek(termEnum2); - while (termDocs2.next()) { - int d2 = termDocs2.doc(); - int f2 = termDocs2.freq(); - info2[len2] = (((long)r2r1[d2])<<32) | f2; - len2++; - } - if (len2>0) break; - if (!termEnum2.next()) break; - } - - if (!hasDeletes) - assertEquals(termEnum1.docFreq(), termEnum2.docFreq()); - - assertEquals(len1, len2); - if (len1==0) break; // no more terms - - assertEquals(term1, term2); - - // sort info2 to get it into ascending docid - Arrays.sort(info2, 0, len2); - - // now compare - for (int i=0; i ff1 = d1.getFields(); - List ff2 = d2.getFields(); - - Collections.sort(ff1, fieldNameComparator); - Collections.sort(ff2, fieldNameComparator); - - assertEquals(ff1 + " : " + ff2, ff1.size(), ff2.size()); - - for (int i=0; i docs = new HashMap(); - Random r; - - public int nextInt(int lim) { - return r.nextInt(lim); - } - - // start is inclusive and end is exclusive - public int nextInt(int start, int end) { - return start + r.nextInt(end-start); - } - - char[] buffer = new char[100]; - - private int addUTF8Token(int start) { - final int end = start + nextInt(20); - if (buffer.length < 1+end) { - char[] newBuffer = new char[(int) ((1+end)*1.25)]; - System.arraycopy(buffer, 0, newBuffer, 0, buffer.length); - buffer = newBuffer; - } - - for(int i=start;i fields = new ArrayList(); - String idString = getIdString(); - Field idField = newField(idTerm.field(), idString, Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS); - fields.add(idField); - - int nFields = nextInt(maxFields); - for (int i=0; i