add --shared
[pylucene.git] / lucene-java-3.4.0 / lucene / src / test / org / apache / lucene / index / TestIndexReader.java
1 package org.apache.lucene.index;
2
3 /**
4  * Licensed to the Apache Software Foundation (ASF) under one or more
5  * contributor license agreements.  See the NOTICE file distributed with
6  * this work for additional information regarding copyright ownership.
7  * The ASF licenses this file to You under the Apache License, Version 2.0
8  * (the "License"); you may not use this file except in compliance with
9  * the License.  You may obtain a copy of the License at
10  *
11  *     http://www.apache.org/licenses/LICENSE-2.0
12  *
13  * Unless required by applicable law or agreed to in writing, software
14  * distributed under the License is distributed on an "AS IS" BASIS,
15  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16  * See the License for the specific language governing permissions and
17  * limitations under the License.
18  */
19
20
21 import java.io.File;
22 import java.io.FileNotFoundException;
23 import java.io.IOException;
24 import java.util.Collection;
25 import java.util.HashSet;
26 import java.util.Iterator;
27 import java.util.List;
28 import java.util.Map;
29 import java.util.HashMap;
30 import java.util.Set;
31 import java.util.SortedSet;
32
33 import org.apache.lucene.analysis.MockAnalyzer;
34 import org.apache.lucene.analysis.WhitespaceAnalyzer;
35 import org.apache.lucene.analysis.standard.StandardAnalyzer;
36 import org.apache.lucene.document.Document;
37 import org.apache.lucene.document.Field;
38 import org.apache.lucene.document.FieldSelector;
39 import org.apache.lucene.document.Fieldable;
40 import org.apache.lucene.document.SetBasedFieldSelector;
41 import org.apache.lucene.index.IndexReader.FieldOption;
42 import org.apache.lucene.index.IndexWriterConfig.OpenMode;
43 import org.apache.lucene.search.FieldCache;
44 import org.apache.lucene.search.IndexSearcher;
45 import org.apache.lucene.search.ScoreDoc;
46 import org.apache.lucene.search.TermQuery;
47 import org.apache.lucene.store.AlreadyClosedException;
48 import org.apache.lucene.store.Directory;
49 import org.apache.lucene.store.LockObtainFailedException;
50 import org.apache.lucene.store.MockDirectoryWrapper;
51 import org.apache.lucene.store.NoSuchDirectoryException;
52 import org.apache.lucene.store.RAMDirectory;
53 import org.apache.lucene.store.LockReleaseFailedException;
54 import org.apache.lucene.util.LuceneTestCase;
55 import org.apache.lucene.util._TestUtil;
56
57 public class TestIndexReader extends LuceneTestCase {
58     
59     public void testCommitUserData() throws Exception {
60       Directory d = newDirectory();
61
62       Map<String,String> commitUserData = new HashMap<String,String>();
63       commitUserData.put("foo", "fighters");
64       
65       // set up writer
66       IndexWriter writer = new IndexWriter(d, newIndexWriterConfig(
67           TEST_VERSION_CURRENT, new MockAnalyzer(random))
68       .setMaxBufferedDocs(2));
69       for(int i=0;i<27;i++)
70         addDocumentWithFields(writer);
71       writer.close();
72       
73       IndexReader r = IndexReader.open(d, false);
74       r.deleteDocument(5);
75       r.flush(commitUserData);
76       r.close();
77       
78       SegmentInfos sis = new SegmentInfos();
79       sis.read(d);
80       IndexReader r2 = IndexReader.open(d, false);
81       IndexCommit c = r.getIndexCommit();
82       assertEquals(c.getUserData(), commitUserData);
83
84       assertEquals(sis.getCurrentSegmentFileName(), c.getSegmentsFileName());
85
86       assertTrue(c.equals(r.getIndexCommit()));
87
88       // Change the index
89       writer = new IndexWriter(d, newIndexWriterConfig(TEST_VERSION_CURRENT,
90           new MockAnalyzer(random)).setOpenMode(
91               OpenMode.APPEND).setMaxBufferedDocs(2));
92       for(int i=0;i<7;i++)
93         addDocumentWithFields(writer);
94       writer.close();
95
96       IndexReader r3 = r2.reopen();
97       assertFalse(c.equals(r3.getIndexCommit()));
98       assertFalse(r2.getIndexCommit().isOptimized());
99       r3.close();
100
101       writer = new IndexWriter(d, newIndexWriterConfig(TEST_VERSION_CURRENT,
102         new MockAnalyzer(random))
103         .setOpenMode(OpenMode.APPEND));
104       writer.optimize();
105       writer.close();
106
107       r3 = r2.reopen();
108       assertTrue(r3.getIndexCommit().isOptimized());
109       r2.close();
110       r3.close();
111       d.close();
112     }
113     
114     public void testIsCurrent() throws Exception {
115       Directory d = newDirectory();
116       IndexWriter writer = new IndexWriter(d, newIndexWriterConfig( 
117         TEST_VERSION_CURRENT, new MockAnalyzer(random)));
118       addDocumentWithFields(writer);
119       writer.close();
120       // set up reader:
121       IndexReader reader = IndexReader.open(d, false);
122       assertTrue(reader.isCurrent());
123       // modify index by adding another document:
124       writer = new IndexWriter(d, newIndexWriterConfig(TEST_VERSION_CURRENT,
125           new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND));
126       addDocumentWithFields(writer);
127       writer.close();
128       assertFalse(reader.isCurrent());
129       // re-create index:
130       writer = new IndexWriter(d, newIndexWriterConfig(TEST_VERSION_CURRENT,
131           new MockAnalyzer(random)).setOpenMode(OpenMode.CREATE));
132       addDocumentWithFields(writer);
133       writer.close();
134       assertFalse(reader.isCurrent());
135       reader.close();
136       d.close();
137     }
138
139     /**
140      * Tests the IndexReader.getFieldNames implementation
141      * @throws Exception on error
142      */
143     public void testGetFieldNames() throws Exception {
144         Directory d = newDirectory();
145         // set up writer
146         IndexWriter writer = new IndexWriter(
147             d,
148             newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))
149         );
150
151         Document doc = new Document();
152         doc.add(new Field("keyword","test1", Field.Store.YES, Field.Index.NOT_ANALYZED));
153         doc.add(new Field("text","test1", Field.Store.YES, Field.Index.ANALYZED));
154         doc.add(new Field("unindexed","test1", Field.Store.YES, Field.Index.NO));
155         doc.add(new Field("unstored","test1", Field.Store.NO, Field.Index.ANALYZED));
156         writer.addDocument(doc);
157
158         writer.close();
159         // set up reader
160         IndexReader reader = IndexReader.open(d, false);
161         Collection<String> fieldNames = reader.getFieldNames(IndexReader.FieldOption.ALL);
162         assertTrue(fieldNames.contains("keyword"));
163         assertTrue(fieldNames.contains("text"));
164         assertTrue(fieldNames.contains("unindexed"));
165         assertTrue(fieldNames.contains("unstored"));
166         reader.close();
167         // add more documents
168         writer = new IndexWriter(
169             d,
170             newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
171                 setOpenMode(OpenMode.APPEND).
172                 setMergePolicy(newLogMergePolicy())
173         );
174         // want to get some more segments here
175         int mergeFactor = ((LogMergePolicy) writer.getConfig().getMergePolicy()).getMergeFactor();
176         for (int i = 0; i < 5*mergeFactor; i++) {
177           doc = new Document();
178           doc.add(new Field("keyword","test1", Field.Store.YES, Field.Index.NOT_ANALYZED));
179           doc.add(new Field("text","test1", Field.Store.YES, Field.Index.ANALYZED));
180           doc.add(new Field("unindexed","test1", Field.Store.YES, Field.Index.NO));
181           doc.add(new Field("unstored","test1", Field.Store.NO, Field.Index.ANALYZED));
182           writer.addDocument(doc);
183         }
184         // new fields are in some different segments (we hope)
185         for (int i = 0; i < 5*mergeFactor; i++) {
186           doc = new Document();
187           doc.add(new Field("keyword2","test1", Field.Store.YES, Field.Index.NOT_ANALYZED));
188           doc.add(new Field("text2","test1", Field.Store.YES, Field.Index.ANALYZED));
189           doc.add(new Field("unindexed2","test1", Field.Store.YES, Field.Index.NO));
190           doc.add(new Field("unstored2","test1", Field.Store.NO, Field.Index.ANALYZED));
191           writer.addDocument(doc);
192         }
193         // new termvector fields
194         for (int i = 0; i < 5*mergeFactor; i++) {
195           doc = new Document();
196           doc.add(new Field("tvnot","tvnot", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.NO));
197           doc.add(new Field("termvector","termvector", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.YES));
198           doc.add(new Field("tvoffset","tvoffset", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_OFFSETS));
199           doc.add(new Field("tvposition","tvposition", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS));
200           doc.add(newField("tvpositionoffset","tvpositionoffset", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
201           writer.addDocument(doc);
202         }
203         
204         writer.close();
205         // verify fields again
206         reader = IndexReader.open(d, false);
207         fieldNames = reader.getFieldNames(IndexReader.FieldOption.ALL);
208         assertEquals(13, fieldNames.size());    // the following fields
209         assertTrue(fieldNames.contains("keyword"));
210         assertTrue(fieldNames.contains("text"));
211         assertTrue(fieldNames.contains("unindexed"));
212         assertTrue(fieldNames.contains("unstored"));
213         assertTrue(fieldNames.contains("keyword2"));
214         assertTrue(fieldNames.contains("text2"));
215         assertTrue(fieldNames.contains("unindexed2"));
216         assertTrue(fieldNames.contains("unstored2"));
217         assertTrue(fieldNames.contains("tvnot"));
218         assertTrue(fieldNames.contains("termvector"));
219         assertTrue(fieldNames.contains("tvposition"));
220         assertTrue(fieldNames.contains("tvoffset"));
221         assertTrue(fieldNames.contains("tvpositionoffset"));
222         
223         // verify that only indexed fields were returned
224         fieldNames = reader.getFieldNames(IndexReader.FieldOption.INDEXED);
225         assertEquals(11, fieldNames.size());    // 6 original + the 5 termvector fields 
226         assertTrue(fieldNames.contains("keyword"));
227         assertTrue(fieldNames.contains("text"));
228         assertTrue(fieldNames.contains("unstored"));
229         assertTrue(fieldNames.contains("keyword2"));
230         assertTrue(fieldNames.contains("text2"));
231         assertTrue(fieldNames.contains("unstored2"));
232         assertTrue(fieldNames.contains("tvnot"));
233         assertTrue(fieldNames.contains("termvector"));
234         assertTrue(fieldNames.contains("tvposition"));
235         assertTrue(fieldNames.contains("tvoffset"));
236         assertTrue(fieldNames.contains("tvpositionoffset"));
237         
238         // verify that only unindexed fields were returned
239         fieldNames = reader.getFieldNames(IndexReader.FieldOption.UNINDEXED);
240         assertEquals(2, fieldNames.size());    // the following fields
241         assertTrue(fieldNames.contains("unindexed"));
242         assertTrue(fieldNames.contains("unindexed2"));
243                 
244         // verify index term vector fields  
245         fieldNames = reader.getFieldNames(IndexReader.FieldOption.TERMVECTOR);
246         assertEquals(1, fieldNames.size());    // 1 field has term vector only
247         assertTrue(fieldNames.contains("termvector"));
248         
249         fieldNames = reader.getFieldNames(IndexReader.FieldOption.TERMVECTOR_WITH_POSITION);
250         assertEquals(1, fieldNames.size());    // 4 fields are indexed with term vectors
251         assertTrue(fieldNames.contains("tvposition"));
252         
253         fieldNames = reader.getFieldNames(IndexReader.FieldOption.TERMVECTOR_WITH_OFFSET);
254         assertEquals(1, fieldNames.size());    // 4 fields are indexed with term vectors
255         assertTrue(fieldNames.contains("tvoffset"));
256                 
257         fieldNames = reader.getFieldNames(IndexReader.FieldOption.TERMVECTOR_WITH_POSITION_OFFSET);
258         assertEquals(1, fieldNames.size());    // 4 fields are indexed with term vectors
259         assertTrue(fieldNames.contains("tvpositionoffset"));
260         reader.close();
261         d.close();
262     }
263
264   public void testTermVectors() throws Exception {
265     Directory d = newDirectory();
266     // set up writer
267     IndexWriter writer = new IndexWriter(
268         d,
269         newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
270             setMergePolicy(newLogMergePolicy())
271     );
272     // want to get some more segments here
273     // new termvector fields
274     int mergeFactor = ((LogMergePolicy) writer.getConfig().getMergePolicy()).getMergeFactor();
275     for (int i = 0; i < 5 * mergeFactor; i++) {
276       Document doc = new Document();
277         doc.add(new Field("tvnot","one two two three three three", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.NO));
278         doc.add(new Field("termvector","one two two three three three", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.YES));
279         doc.add(new Field("tvoffset","one two two three three three", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_OFFSETS));
280         doc.add(new Field("tvposition","one two two three three three", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS));
281         doc.add(new Field("tvpositionoffset","one two two three three three", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
282
283         writer.addDocument(doc);
284     }
285     writer.close();
286     IndexReader reader = IndexReader.open(d, false);
287     FieldSortedTermVectorMapper mapper = new FieldSortedTermVectorMapper(new TermVectorEntryFreqSortedComparator());
288     reader.getTermFreqVector(0, mapper);
289     Map<String,SortedSet<TermVectorEntry>> map = mapper.getFieldToTerms();
290     assertTrue("map is null and it shouldn't be", map != null);
291     assertTrue("map Size: " + map.size() + " is not: " + 4, map.size() == 4);
292     Set<TermVectorEntry> set = map.get("termvector");
293     for (Iterator<TermVectorEntry> iterator = set.iterator(); iterator.hasNext();) {
294       TermVectorEntry entry =  iterator.next();
295       assertTrue("entry is null and it shouldn't be", entry != null);
296       if (VERBOSE) System.out.println("Entry: " + entry);
297     }
298     reader.close();
299     d.close();
300   }
301
302   static void assertTermDocsCount(String msg,
303                                      IndexReader reader,
304                                      Term term,
305                                      int expected)
306     throws IOException
307     {
308         TermDocs tdocs = null;
309
310         try {
311             tdocs = reader.termDocs(term);
312             assertNotNull(msg + ", null TermDocs", tdocs);
313             int count = 0;
314             while(tdocs.next()) {
315                 count++;
316             }
317             assertEquals(msg + ", count mismatch", expected, count);
318
319         } finally {
320             if (tdocs != null)
321                 tdocs.close();
322         }
323
324     }
325
326     
327     public void testBinaryFields() throws IOException {
328         Directory dir = newDirectory();
329         byte[] bin = new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
330         
331         IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy()));
332         
333         for (int i = 0; i < 10; i++) {
334           addDoc(writer, "document number " + (i + 1));
335           addDocumentWithFields(writer);
336           addDocumentWithDifferentFields(writer);
337           addDocumentWithTermVectorFields(writer);
338         }
339         writer.close();
340         writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND).setMergePolicy(newLogMergePolicy()));
341         Document doc = new Document();
342         doc.add(new Field("bin1", bin));
343         doc.add(new Field("junk", "junk text", Field.Store.NO, Field.Index.ANALYZED));
344         writer.addDocument(doc);
345         writer.close();
346         IndexReader reader = IndexReader.open(dir, false);
347         doc = reader.document(reader.maxDoc() - 1);
348         Field[] fields = doc.getFields("bin1");
349         assertNotNull(fields);
350         assertEquals(1, fields.length);
351         Field b1 = fields[0];
352         assertTrue(b1.isBinary());
353         byte[] data1 = b1.getBinaryValue();
354         assertEquals(bin.length, b1.getBinaryLength());
355         for (int i = 0; i < bin.length; i++) {
356           assertEquals(bin[i], data1[i + b1.getBinaryOffset()]);
357         }
358         Set<String> lazyFields = new HashSet<String>();
359         lazyFields.add("bin1");
360         FieldSelector sel = new SetBasedFieldSelector(new HashSet<String>(), lazyFields);
361         doc = reader.document(reader.maxDoc() - 1, sel);
362         Fieldable[] fieldables = doc.getFieldables("bin1");
363         assertNotNull(fieldables);
364         assertEquals(1, fieldables.length);
365         Fieldable fb1 = fieldables[0];
366         assertTrue(fb1.isBinary());
367         assertEquals(bin.length, fb1.getBinaryLength());
368         data1 = fb1.getBinaryValue();
369         assertEquals(bin.length, fb1.getBinaryLength());
370         for (int i = 0; i < bin.length; i++) {
371           assertEquals(bin[i], data1[i + fb1.getBinaryOffset()]);
372         }
373         reader.close();
374         // force optimize
375
376
377         writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND).setMergePolicy(newLogMergePolicy()));
378         writer.optimize();
379         writer.close();
380         reader = IndexReader.open(dir, false);
381         doc = reader.document(reader.maxDoc() - 1);
382         fields = doc.getFields("bin1");
383         assertNotNull(fields);
384         assertEquals(1, fields.length);
385         b1 = fields[0];
386         assertTrue(b1.isBinary());
387         data1 = b1.getBinaryValue();
388         assertEquals(bin.length, b1.getBinaryLength());
389         for (int i = 0; i < bin.length; i++) {
390           assertEquals(bin[i], data1[i + b1.getBinaryOffset()]);
391         }
392         reader.close();
393         dir.close();
394     }
395
396     // Make sure attempts to make changes after reader is
397     // closed throws IOException:
398     public void testChangesAfterClose() throws IOException {
399         Directory dir = newDirectory();
400
401         IndexWriter writer = null;
402         IndexReader reader = null;
403         Term searchTerm = new Term("content", "aaa");
404
405         //  add 11 documents with term : aaa
406         writer  = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
407         for (int i = 0; i < 11; i++) {
408             addDoc(writer, searchTerm.text());
409         }
410         writer.close();
411
412         reader = IndexReader.open(dir, false);
413
414         // Close reader:
415         reader.close();
416
417         // Then, try to make changes:
418         try {
419           reader.deleteDocument(4);
420           fail("deleteDocument after close failed to throw IOException");
421         } catch (AlreadyClosedException e) {
422           // expected
423         }
424
425         try {
426           reader.setNorm(5, "aaa", 2.0f);
427           fail("setNorm after close failed to throw IOException");
428         } catch (AlreadyClosedException e) {
429           // expected
430         }
431
432         try {
433           reader.undeleteAll();
434           fail("undeleteAll after close failed to throw IOException");
435         } catch (AlreadyClosedException e) {
436           // expected
437         }
438         dir.close();
439     }
440
441     // Make sure we get lock obtain failed exception with 2 writers:
442     public void testLockObtainFailed() throws IOException {
443         Directory dir = newDirectory();
444
445         Term searchTerm = new Term("content", "aaa");
446
447         //  add 11 documents with term : aaa
448         IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
449         writer.commit();
450         for (int i = 0; i < 11; i++) {
451             addDoc(writer, searchTerm.text());
452         }
453
454         // Create reader:
455         IndexReader reader = IndexReader.open(dir, false);
456
457         // Try to make changes
458         try {
459           reader.deleteDocument(4);
460           fail("deleteDocument should have hit LockObtainFailedException");
461         } catch (LockObtainFailedException e) {
462           // expected
463         }
464
465         try {
466           reader.setNorm(5, "aaa", 2.0f);
467           fail("setNorm should have hit LockObtainFailedException");
468         } catch (LockObtainFailedException e) {
469           // expected
470         }
471
472         try {
473           reader.undeleteAll();
474           fail("undeleteAll should have hit LockObtainFailedException");
475         } catch (LockObtainFailedException e) {
476           // expected
477         }
478         writer.close();
479         reader.close();
480         dir.close();
481     }
482
483     // Make sure you can set norms & commit even if a reader
484     // is open against the index:
485     public void testWritingNorms() throws IOException {
486         Directory dir = newDirectory();
487         IndexWriter writer;
488         IndexReader reader;
489         Term searchTerm = new Term("content", "aaa");
490
491         //  add 1 documents with term : aaa
492         writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
493         addDoc(writer, searchTerm.text());
494         writer.close();
495
496         //  now open reader & set norm for doc 0
497         reader = IndexReader.open(dir, false);
498         reader.setNorm(0, "content", (float) 2.0);
499
500         // we should be holding the write lock now:
501         assertTrue("locked", IndexWriter.isLocked(dir));
502
503         reader.commit();
504
505         // we should not be holding the write lock now:
506         assertTrue("not locked", !IndexWriter.isLocked(dir));
507
508         // open a 2nd reader:
509         IndexReader reader2 = IndexReader.open(dir, false);
510
511         // set norm again for doc 0
512         reader.setNorm(0, "content", (float) 3.0);
513         assertTrue("locked", IndexWriter.isLocked(dir));
514
515         reader.close();
516
517         // we should not be holding the write lock now:
518         assertTrue("not locked", !IndexWriter.isLocked(dir));
519
520         reader2.close();
521         dir.close();
522     }
523
524
525     // Make sure you can set norms & commit, and there are
526     // no extra norms files left:
527     public void testWritingNormsNoReader() throws IOException {
528         Directory dir = newDirectory();
529         IndexWriter writer = null;
530         IndexReader reader = null;
531         Term searchTerm = new Term("content", "aaa");
532
533         //  add 1 documents with term : aaa
534         writer  = new IndexWriter(
535             dir,
536             newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
537                 setMergePolicy(newLogMergePolicy(false))
538         );
539         addDoc(writer, searchTerm.text());
540         writer.close();
541
542         //  now open reader & set norm for doc 0 (writes to
543         //  _0_1.s0)
544         reader = IndexReader.open(dir, false);
545         reader.setNorm(0, "content", (float) 2.0);
546         reader.close();
547         
548         //  now open reader again & set norm for doc 0 (writes to _0_2.s0)
549         reader = IndexReader.open(dir, false);
550         reader.setNorm(0, "content", (float) 2.0);
551         reader.close();
552         assertFalse("failed to remove first generation norms file on writing second generation",
553                     dir.fileExists("_0_1.s0"));
554         
555         dir.close();
556     }
557
558     /* ??? public void testOpenEmptyDirectory() throws IOException{
559       String dirName = "test.empty";
560       File fileDirName = new File(dirName);
561       if (!fileDirName.exists()) {
562         fileDirName.mkdir();
563       }
564       try {
565         IndexReader.open(fileDirName);
566         fail("opening IndexReader on empty directory failed to produce FileNotFoundException");
567       } catch (FileNotFoundException e) {
568         // GOOD
569       }
570       rmDir(fileDirName);
571     }*/
572     
573   public void testFilesOpenClose() throws IOException {
574         // Create initial data set
575         File dirFile = _TestUtil.getTempDir("TestIndexReader.testFilesOpenClose");
576         Directory dir = newFSDirectory(dirFile);
577         IndexWriter writer  = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
578         addDoc(writer, "test");
579         writer.close();
580         dir.close();
581
582         // Try to erase the data - this ensures that the writer closed all files
583         _TestUtil.rmDir(dirFile);
584         dir = newFSDirectory(dirFile);
585
586         // Now create the data set again, just as before
587         writer  = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.CREATE));
588         addDoc(writer, "test");
589         writer.close();
590         dir.close();
591
592         // Now open existing directory and test that reader closes all files
593         dir = newFSDirectory(dirFile);
594         IndexReader reader1 = IndexReader.open(dir, false);
595         reader1.close();
596         dir.close();
597
598         // The following will fail if reader did not close
599         // all files
600         _TestUtil.rmDir(dirFile);
601     }
602
603     public void testLastModified() throws Exception {
604       for(int i=0;i<2;i++) {
605         final Directory dir = newDirectory();
606         assertFalse(IndexReader.indexExists(dir));
607         IndexWriter writer  = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.CREATE));
608         addDocumentWithFields(writer);
609         assertTrue(IndexWriter.isLocked(dir));          // writer open, so dir is locked
610         writer.close();
611         assertTrue(IndexReader.indexExists(dir));
612         IndexReader reader = IndexReader.open(dir, false);
613         assertFalse(IndexWriter.isLocked(dir));         // reader only, no lock
614         long version = IndexReader.lastModified(dir);
615         if (i == 1) {
616           long version2 = IndexReader.lastModified(dir);
617           assertEquals(version, version2);
618         }
619         reader.close();
620         // modify index and check version has been
621         // incremented:
622         Thread.sleep(1000);
623
624         writer  = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.CREATE));
625         addDocumentWithFields(writer);
626         writer.close();
627         reader = IndexReader.open(dir, false);
628         assertTrue("old lastModified is " + version + "; new lastModified is " + IndexReader.lastModified(dir), version <= IndexReader.lastModified(dir));
629         reader.close();
630         dir.close();
631       }
632     }
633
634     public void testVersion() throws IOException {
635       Directory dir = newDirectory();
636       assertFalse(IndexReader.indexExists(dir));
637       IndexWriter writer  = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
638       addDocumentWithFields(writer);
639       assertTrue(IndexWriter.isLocked(dir));            // writer open, so dir is locked
640       writer.close();
641       assertTrue(IndexReader.indexExists(dir));
642       IndexReader reader = IndexReader.open(dir, false);
643       assertFalse(IndexWriter.isLocked(dir));           // reader only, no lock
644       long version = IndexReader.getCurrentVersion(dir);
645       reader.close();
646       // modify index and check version has been
647       // incremented:
648       writer  = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.CREATE));
649       addDocumentWithFields(writer);
650       writer.close();
651       reader = IndexReader.open(dir, false);
652       assertTrue("old version is " + version + "; new version is " + IndexReader.getCurrentVersion(dir), version < IndexReader.getCurrentVersion(dir));
653       reader.close();
654       dir.close();
655     }
656
657     public void testLock() throws IOException {
658       Directory dir = newDirectory();
659       IndexWriter writer  = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
660       addDocumentWithFields(writer);
661       writer.close();
662       writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND));
663       IndexReader reader = IndexReader.open(dir, false);
664       try {
665         reader.deleteDocument(0);
666         fail("expected lock");
667       } catch(IOException e) {
668         // expected exception
669       }
670       try {
671         IndexWriter.unlock(dir);                // this should not be done in the real world! 
672       } catch (LockReleaseFailedException lrfe) {
673         writer.close();
674       }
675       reader.deleteDocument(0);
676       reader.close();
677       writer.close();
678       dir.close();
679     }
680
681     public void testDocsOutOfOrderJIRA140() throws IOException {
682       Directory dir = newDirectory();      
683       IndexWriter writer  = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
684       for(int i=0;i<11;i++) {
685         addDoc(writer, "aaa");
686       }
687       writer.close();
688       IndexReader reader = IndexReader.open(dir, false);
689
690       // Try to delete an invalid docId, yet, within range
691       // of the final bits of the BitVector:
692
693       boolean gotException = false;
694       try {
695         reader.deleteDocument(11);
696       } catch (ArrayIndexOutOfBoundsException e) {
697         gotException = true;
698       }
699       reader.close();
700
701       writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND));
702
703       // We must add more docs to get a new segment written
704       for(int i=0;i<11;i++) {
705         addDoc(writer, "aaa");
706       }
707
708       // Without the fix for LUCENE-140 this call will
709       // [incorrectly] hit a "docs out of order"
710       // IllegalStateException because above out-of-bounds
711       // deleteDocument corrupted the index:
712       writer.optimize();
713       writer.close();
714       if (!gotException) {
715         fail("delete of out-of-bounds doc number failed to hit exception");
716       }
717       dir.close();
718     }
719
720     public void testExceptionReleaseWriteLockJIRA768() throws IOException {
721
722       Directory dir = newDirectory();      
723       IndexWriter writer  = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
724       addDoc(writer, "aaa");
725       writer.close();
726
727       IndexReader reader = IndexReader.open(dir, false);
728       try {
729         reader.deleteDocument(1);
730         fail("did not hit exception when deleting an invalid doc number");
731       } catch (ArrayIndexOutOfBoundsException e) {
732         // expected
733       }
734       reader.close();
735       if (IndexWriter.isLocked(dir)) {
736         fail("write lock is still held after close");
737       }
738
739       reader = IndexReader.open(dir, false);
740       try {
741         reader.setNorm(1, "content", (float) 2.0);
742         fail("did not hit exception when calling setNorm on an invalid doc number");
743       } catch (ArrayIndexOutOfBoundsException e) {
744         // expected
745       }
746       reader.close();
747       if (IndexWriter.isLocked(dir)) {
748         fail("write lock is still held after close");
749       }
750       dir.close();
751     }
752
753     private String arrayToString(String[] l) {
754       String s = "";
755       for(int i=0;i<l.length;i++) {
756         if (i > 0) {
757           s += "\n    ";
758         }
759         s += l[i];
760       }
761       return s;
762     }
763
764     public void testOpenReaderAfterDelete() throws IOException {
765       File dirFile = _TestUtil.getTempDir("deletetest");
766       Directory dir = newFSDirectory(dirFile);
767       try {
768         IndexReader.open(dir, false);
769         fail("expected FileNotFoundException");
770       } catch (FileNotFoundException e) {
771         // expected
772       }
773
774       dirFile.delete();
775
776       // Make sure we still get a CorruptIndexException (not NPE):
777       try {
778         IndexReader.open(dir, false);
779         fail("expected FileNotFoundException");
780       } catch (FileNotFoundException e) {
781         // expected
782       }
783       
784       dir.close();
785     }
786
787     static void addDocumentWithFields(IndexWriter writer) throws IOException
788     {
789         Document doc = new Document();
790         doc.add(newField("keyword","test1", Field.Store.YES, Field.Index.NOT_ANALYZED));
791         doc.add(newField("text","test1", Field.Store.YES, Field.Index.ANALYZED));
792         doc.add(newField("unindexed","test1", Field.Store.YES, Field.Index.NO));
793         doc.add(newField("unstored","test1", Field.Store.NO, Field.Index.ANALYZED));
794         writer.addDocument(doc);
795     }
796
797     static void addDocumentWithDifferentFields(IndexWriter writer) throws IOException
798     {
799         Document doc = new Document();
800         doc.add(newField("keyword2","test1", Field.Store.YES, Field.Index.NOT_ANALYZED));
801         doc.add(newField("text2","test1", Field.Store.YES, Field.Index.ANALYZED));
802         doc.add(newField("unindexed2","test1", Field.Store.YES, Field.Index.NO));
803         doc.add(newField("unstored2","test1", Field.Store.NO, Field.Index.ANALYZED));
804         writer.addDocument(doc);
805     }
806
807     static void addDocumentWithTermVectorFields(IndexWriter writer) throws IOException
808     {
809         Document doc = new Document();
810         doc.add(newField("tvnot","tvnot", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.NO));
811         doc.add(newField("termvector","termvector", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.YES));
812         doc.add(newField("tvoffset","tvoffset", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_OFFSETS));
813         doc.add(newField("tvposition","tvposition", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS));
814         doc.add(newField("tvpositionoffset","tvpositionoffset", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
815         
816         writer.addDocument(doc);
817     }
818     
819     static void addDoc(IndexWriter writer, String value) throws IOException {
820         Document doc = new Document();
821         doc.add(newField("content", value, Field.Store.NO, Field.Index.ANALYZED));
822         writer.addDocument(doc);
823     }
824
825     public static void assertIndexEquals(IndexReader index1, IndexReader index2) throws IOException {
826       assertEquals("IndexReaders have different values for numDocs.", index1.numDocs(), index2.numDocs());
827       assertEquals("IndexReaders have different values for maxDoc.", index1.maxDoc(), index2.maxDoc());
828       assertEquals("Only one IndexReader has deletions.", index1.hasDeletions(), index2.hasDeletions());
829       assertEquals("Only one index is optimized.", index1.isOptimized(), index2.isOptimized());
830       
831       // check field names
832       Collection<String> fields1 = index1.getFieldNames(FieldOption.ALL);
833       Collection<String> fields2 = index1.getFieldNames(FieldOption.ALL);
834       assertEquals("IndexReaders have different numbers of fields.", fields1.size(), fields2.size());
835       Iterator<String> it1 = fields1.iterator();
836       Iterator<String> it2 = fields1.iterator();
837       while (it1.hasNext()) {
838         assertEquals("Different field names.", it1.next(), it2.next());
839       }
840       
841       // check norms
842       it1 = fields1.iterator();
843       while (it1.hasNext()) {
844         String curField = it1.next();
845         byte[] norms1 = index1.norms(curField);
846         byte[] norms2 = index2.norms(curField);
847         if (norms1 != null && norms2 != null)
848         {
849           assertEquals(norms1.length, norms2.length);
850                 for (int i = 0; i < norms1.length; i++) {
851                   assertEquals("Norm different for doc " + i + " and field '" + curField + "'.", norms1[i], norms2[i]);
852                 }
853         }
854         else
855         {
856           assertSame(norms1, norms2);
857         }
858       }
859       
860       // check deletions
861       for (int i = 0; i < index1.maxDoc(); i++) {
862         assertEquals("Doc " + i + " only deleted in one index.", index1.isDeleted(i), index2.isDeleted(i));
863       }
864       
865       // check stored fields
866       for (int i = 0; i < index1.maxDoc(); i++) {
867         if (!index1.isDeleted(i)) {
868           Document doc1 = index1.document(i);
869           Document doc2 = index2.document(i);
870           List<Fieldable> fieldable1 = doc1.getFields();
871           List<Fieldable> fieldable2 = doc2.getFields();
872           assertEquals("Different numbers of fields for doc " + i + ".", fieldable1.size(), fieldable2.size());
873           Iterator<Fieldable> itField1 = fieldable1.iterator();
874           Iterator<Fieldable> itField2 = fieldable2.iterator();
875           while (itField1.hasNext()) {
876             Field curField1 = (Field) itField1.next();
877             Field curField2 = (Field) itField2.next();
878             assertEquals("Different fields names for doc " + i + ".", curField1.name(), curField2.name());
879             assertEquals("Different field values for doc " + i + ".", curField1.stringValue(), curField2.stringValue());
880           }          
881         }
882       }
883       
884       // check dictionary and posting lists
885       TermEnum enum1 = index1.terms();
886       TermEnum enum2 = index2.terms();
887       TermPositions tp1 = index1.termPositions();
888       TermPositions tp2 = index2.termPositions();
889       while(enum1.next()) {
890         assertTrue(enum2.next());
891         assertEquals("Different term in dictionary.", enum1.term(), enum2.term());
892         tp1.seek(enum1.term());
893         tp2.seek(enum1.term());
894         while(tp1.next()) {
895           assertTrue(tp2.next());
896           assertEquals("Different doc id in postinglist of term " + enum1.term() + ".", tp1.doc(), tp2.doc());
897           assertEquals("Different term frequence in postinglist of term " + enum1.term() + ".", tp1.freq(), tp2.freq());
898           for (int i = 0; i < tp1.freq(); i++) {
899             assertEquals("Different positions in postinglist of term " + enum1.term() + ".", tp1.nextPosition(), tp2.nextPosition());
900           }
901         }
902       }
903     }
904
905     public void testGetIndexCommit() throws IOException {
906
907       Directory d = newDirectory();
908
909       // set up writer
910       IndexWriter writer = new IndexWriter(
911           d,
912           newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
913               setMaxBufferedDocs(2).
914               setMergePolicy(newLogMergePolicy(10))
915       );
916       for(int i=0;i<27;i++)
917         addDocumentWithFields(writer);
918       writer.close();
919
920       SegmentInfos sis = new SegmentInfos();
921       sis.read(d);
922       IndexReader r = IndexReader.open(d, false);
923       IndexCommit c = r.getIndexCommit();
924
925       assertEquals(sis.getCurrentSegmentFileName(), c.getSegmentsFileName());
926
927       assertTrue(c.equals(r.getIndexCommit()));
928
929       // Change the index
930       writer = new IndexWriter(
931           d,
932           newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
933               setOpenMode(OpenMode.APPEND).
934               setMaxBufferedDocs(2).
935               setMergePolicy(newLogMergePolicy(10))
936       );
937       for(int i=0;i<7;i++)
938         addDocumentWithFields(writer);
939       writer.close();
940
941       IndexReader r2 = r.reopen();
942       assertFalse(c.equals(r2.getIndexCommit()));
943       assertFalse(r2.getIndexCommit().isOptimized());
944       r2.close();
945
946       writer = new IndexWriter(d, newIndexWriterConfig(TEST_VERSION_CURRENT,
947         new MockAnalyzer(random))
948         .setOpenMode(OpenMode.APPEND));
949       writer.optimize();
950       writer.close();
951
952       r2 = r.reopen();
953       assertTrue(r2.getIndexCommit().isOptimized());
954
955       r.close();
956       r2.close();
957       d.close();
958     }      
959
960     public void testReadOnly() throws Throwable {
961       Directory d = newDirectory();
962       IndexWriter writer = new IndexWriter(d, newIndexWriterConfig(
963         TEST_VERSION_CURRENT, new MockAnalyzer(random)));
964       addDocumentWithFields(writer);
965       writer.commit();
966       addDocumentWithFields(writer);
967       writer.close();
968
969       IndexReader r = IndexReader.open(d, true);
970       try {
971         r.deleteDocument(0);
972         fail();
973       } catch (UnsupportedOperationException uoe) {
974         // expected
975       }
976
977       writer = new IndexWriter(
978           d,
979           newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
980               setOpenMode(OpenMode.APPEND).
981               setMergePolicy(newLogMergePolicy(10))
982       );
983       addDocumentWithFields(writer);
984       writer.close();
985
986       // Make sure reopen is still readonly:
987       IndexReader r2 = r.reopen();
988       r.close();
989
990       assertFalse(r == r2);
991
992       try {
993         r2.deleteDocument(0);
994         fail();
995       } catch (UnsupportedOperationException uoe) {
996         // expected
997       }
998
999       writer = new IndexWriter(d, newIndexWriterConfig(TEST_VERSION_CURRENT,
1000         new MockAnalyzer(random))
1001         .setOpenMode(OpenMode.APPEND));
1002       writer.optimize();
1003       writer.close();
1004
1005       // Make sure reopen to a single segment is still readonly:
1006       IndexReader r3 = r2.reopen();
1007       assertFalse(r3 == r2);
1008       r2.close();
1009       
1010       assertFalse(r == r2);
1011
1012       try {
1013         r3.deleteDocument(0);
1014         fail();
1015       } catch (UnsupportedOperationException uoe) {
1016         // expected
1017       }
1018
1019       // Make sure write lock isn't held
1020       writer = new IndexWriter(d, newIndexWriterConfig(TEST_VERSION_CURRENT,
1021           new MockAnalyzer(random))
1022       .setOpenMode(OpenMode.APPEND));
1023       writer.close();
1024
1025       r3.close();
1026       d.close();
1027     }
1028
1029
1030   // LUCENE-1474
1031   public void testIndexReader() throws Exception {
1032     Directory dir = newDirectory();
1033     IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
1034         TEST_VERSION_CURRENT, new MockAnalyzer(random)));
1035     writer.addDocument(createDocument("a"));
1036     writer.addDocument(createDocument("b"));
1037     writer.addDocument(createDocument("c"));
1038     writer.close();
1039     IndexReader reader = IndexReader.open(dir, false);
1040     reader.deleteDocuments(new Term("id", "a"));
1041     reader.flush();
1042     reader.deleteDocuments(new Term("id", "b"));
1043     reader.close();
1044     IndexReader.open(dir,true).close();
1045     dir.close();
1046   }
1047
1048   static Document createDocument(String id) {
1049     Document doc = new Document();
1050     doc.add(newField("id", id, Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS));
1051     return doc;
1052   }
1053
1054   // LUCENE-1468 -- make sure on attempting to open an
1055   // IndexReader on a non-existent directory, you get a
1056   // good exception
1057   public void testNoDir() throws Throwable {
1058     Directory dir = newFSDirectory(_TestUtil.getTempDir("doesnotexist"));
1059     try {
1060       IndexReader.open(dir, true);
1061       fail("did not hit expected exception");
1062     } catch (NoSuchDirectoryException nsde) {
1063       // expected
1064     }
1065     dir.close();
1066   }
1067
1068   // LUCENE-1509
1069   public void testNoDupCommitFileNames() throws Throwable {
1070
1071     Directory dir = newDirectory();
1072     
1073     IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
1074         TEST_VERSION_CURRENT, new MockAnalyzer(random))
1075         .setMaxBufferedDocs(2));
1076     writer.addDocument(createDocument("a"));
1077     writer.addDocument(createDocument("a"));
1078     writer.addDocument(createDocument("a"));
1079     writer.close();
1080     
1081     Collection<IndexCommit> commits = IndexReader.listCommits(dir);
1082     for (final IndexCommit commit : commits) {
1083       Collection<String> files = commit.getFileNames();
1084       HashSet<String> seen = new HashSet<String>();
1085       for (final String fileName : files) { 
1086         assertTrue("file " + fileName + " was duplicated", !seen.contains(fileName));
1087         seen.add(fileName);
1088       }
1089     }
1090
1091     dir.close();
1092   }
1093
1094   // LUCENE-1579: Ensure that on a cloned reader, segments
1095   // reuse the doc values arrays in FieldCache
1096   public void testFieldCacheReuseAfterClone() throws Exception {
1097     Directory dir = newDirectory();
1098     IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
1099     Document doc = new Document();
1100     doc.add(newField("number", "17", Field.Store.NO, Field.Index.NOT_ANALYZED));
1101     writer.addDocument(doc);
1102     writer.close();
1103
1104     // Open reader
1105     IndexReader r = SegmentReader.getOnlySegmentReader(dir);
1106     final int[] ints = FieldCache.DEFAULT.getInts(r, "number");
1107     assertEquals(1, ints.length);
1108     assertEquals(17, ints[0]);
1109
1110     // Clone reader
1111     IndexReader r2 = (IndexReader) r.clone();
1112     r.close();
1113     assertTrue(r2 != r);
1114     final int[] ints2 = FieldCache.DEFAULT.getInts(r2, "number");
1115     r2.close();
1116
1117     assertEquals(1, ints2.length);
1118     assertEquals(17, ints2[0]);
1119     assertTrue(ints == ints2);
1120
1121     dir.close();
1122   }
1123
1124   // LUCENE-1579: Ensure that on a reopened reader, that any
1125   // shared segments reuse the doc values arrays in
1126   // FieldCache
1127   public void testFieldCacheReuseAfterReopen() throws Exception {
1128     Directory dir = newDirectory();
1129     IndexWriter writer = new IndexWriter(
1130         dir,
1131         newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
1132             setMergePolicy(newLogMergePolicy(10))
1133     );
1134     Document doc = new Document();
1135     doc.add(newField("number", "17", Field.Store.NO, Field.Index.NOT_ANALYZED));
1136     ((LogMergePolicy) writer.getMergePolicy()).setMergeFactor(10);
1137     writer.addDocument(doc);
1138     writer.commit();
1139
1140     // Open reader1
1141     IndexReader r = IndexReader.open(dir, false);
1142     IndexReader r1 = SegmentReader.getOnlySegmentReader(r);
1143     final int[] ints = FieldCache.DEFAULT.getInts(r1, "number");
1144     assertEquals(1, ints.length);
1145     assertEquals(17, ints[0]);
1146
1147     // Add new segment
1148     writer.addDocument(doc);
1149     writer.commit();
1150
1151     // Reopen reader1 --> reader2
1152     IndexReader r2 = r.reopen();
1153     r.close();
1154     IndexReader sub0 = r2.getSequentialSubReaders()[0];
1155     final int[] ints2 = FieldCache.DEFAULT.getInts(sub0, "number");
1156     r2.close();
1157     assertTrue(ints == ints2);
1158
1159     writer.close();
1160     dir.close();
1161   }
1162
1163   // LUCENE-1586: getUniqueTermCount
1164   public void testUniqueTermCount() throws Exception {
1165     Directory dir = newDirectory();
1166     IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
1167     Document doc = new Document();
1168     doc.add(newField("field", "a b c d e f g h i j k l m n o p q r s t u v w x y z", Field.Store.NO, Field.Index.ANALYZED));
1169     doc.add(newField("number", "0 1 2 3 4 5 6 7 8 9", Field.Store.NO, Field.Index.ANALYZED));
1170     writer.addDocument(doc);
1171     writer.addDocument(doc);
1172     writer.commit();
1173
1174     IndexReader r = IndexReader.open(dir, false);
1175     IndexReader r1 = SegmentReader.getOnlySegmentReader(r);
1176     assertEquals(36, r1.getUniqueTermCount());
1177     writer.addDocument(doc);
1178     writer.commit();
1179     IndexReader r2 = r.reopen();
1180     r.close();
1181     try {
1182       r2.getUniqueTermCount();
1183       fail("expected exception");
1184     } catch (UnsupportedOperationException uoe) {
1185       // expected
1186     }
1187     IndexReader[] subs = r2.getSequentialSubReaders();
1188     for(int i=0;i<subs.length;i++) {
1189       assertEquals(36, subs[i].getUniqueTermCount());
1190     }
1191     r2.close();
1192     writer.close();
1193     dir.close();
1194   }
1195
1196   // LUCENE-1609: don't load terms index
1197   public void testNoTermsIndex() throws Throwable {
1198     Directory dir = newDirectory();
1199     IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
1200     Document doc = new Document();
1201     doc.add(newField("field", "a b c d e f g h i j k l m n o p q r s t u v w x y z", Field.Store.NO, Field.Index.ANALYZED));
1202     doc.add(newField("number", "0 1 2 3 4 5 6 7 8 9", Field.Store.NO, Field.Index.ANALYZED));
1203     writer.addDocument(doc);
1204     writer.addDocument(doc);
1205     writer.close();
1206
1207     IndexReader r = IndexReader.open(dir, null, true, -1);
1208     try {
1209       r.docFreq(new Term("field", "f"));
1210       fail("did not hit expected exception");
1211     } catch (IllegalStateException ise) {
1212       // expected
1213     }
1214     assertFalse(((SegmentReader) r.getSequentialSubReaders()[0]).termsIndexLoaded());
1215
1216     assertEquals(-1, ((SegmentReader) r.getSequentialSubReaders()[0]).getTermInfosIndexDivisor());
1217     writer = new IndexWriter(
1218         dir,
1219         newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
1220             setMergePolicy(newLogMergePolicy(10))
1221     );
1222     writer.addDocument(doc);
1223     writer.close();
1224
1225     // LUCENE-1718: ensure re-open carries over no terms index:
1226     IndexReader r2 = r.reopen();
1227     r.close();
1228     IndexReader[] subReaders = r2.getSequentialSubReaders();
1229     assertEquals(2, subReaders.length);
1230     for(int i=0;i<2;i++) {
1231       assertFalse(((SegmentReader) subReaders[i]).termsIndexLoaded());
1232     }
1233     r2.close();
1234     dir.close();
1235   }
1236
1237   // LUCENE-2046
1238   public void testPrepareCommitIsCurrent() throws Throwable {
1239     Directory dir = newDirectory();
1240     IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( 
1241         TEST_VERSION_CURRENT, new MockAnalyzer(random)));
1242     writer.commit();
1243     Document doc = new Document();
1244     writer.addDocument(doc);
1245     IndexReader r = IndexReader.open(dir, true);
1246     assertTrue(r.isCurrent());
1247     writer.addDocument(doc);
1248     writer.prepareCommit();
1249     assertTrue(r.isCurrent());
1250     IndexReader r2 = r.reopen();
1251     assertTrue(r == r2);
1252     writer.commit();
1253     assertFalse(r.isCurrent());
1254     writer.close();
1255     r.close();
1256     dir.close();
1257   }
1258   
1259   // LUCENE-2753
1260   public void testListCommits() throws Exception {
1261     Directory dir = newDirectory();
1262     SnapshotDeletionPolicy sdp = new SnapshotDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy());
1263     IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( 
1264         TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).setIndexDeletionPolicy(sdp));
1265     writer.addDocument(new Document());
1266     writer.commit();
1267     sdp.snapshot("c1");
1268     writer.addDocument(new Document());
1269     writer.commit();
1270     sdp.snapshot("c2");
1271     writer.addDocument(new Document());
1272     writer.commit();
1273     sdp.snapshot("c3");
1274     writer.close();
1275     long currentGen = 0;
1276     for (IndexCommit ic : IndexReader.listCommits(dir)) {
1277       assertTrue("currentGen=" + currentGen + " commitGen=" + ic.getGeneration(), currentGen < ic.getGeneration());
1278       currentGen = ic.getGeneration();
1279     }
1280     dir.close();
1281   }
1282
1283   // LUCENE-2812
1284   public void testIndexExists() throws Exception {
1285     Directory dir = newDirectory();
1286     IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
1287     writer.addDocument(new Document());
1288     writer.prepareCommit();
1289     assertFalse(IndexReader.indexExists(dir));
1290     writer.close();
1291     assertTrue(IndexReader.indexExists(dir));
1292     dir.close();
1293   }
1294
1295   // LUCENE-2474
1296   public void testReaderFinishedListener() throws Exception {
1297     Directory dir = newDirectory();
1298     IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy()));
1299     ((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(3);
1300     writer.setInfoStream(VERBOSE ? System.out : null);
1301     writer.addDocument(new Document());
1302     writer.commit();
1303     writer.addDocument(new Document());
1304     writer.commit();
1305     final IndexReader reader = writer.getReader();
1306     final int[] closeCount = new int[1];
1307     final IndexReader.ReaderFinishedListener listener = new IndexReader.ReaderFinishedListener() {
1308       public void finished(IndexReader reader) {
1309         closeCount[0]++;
1310       }
1311     };
1312
1313     reader.addReaderFinishedListener(listener);
1314
1315     reader.close();
1316
1317     // Just the top reader
1318     assertEquals(1, closeCount[0]);
1319     writer.close();
1320
1321     // Now also the subs
1322     assertEquals(3, closeCount[0]);
1323
1324     IndexReader reader2 = IndexReader.open(dir);
1325     reader2.addReaderFinishedListener(listener);
1326
1327     closeCount[0] = 0;
1328     reader2.close();
1329     assertEquals(3, closeCount[0]);
1330     dir.close();
1331   }
1332
1333   public void testOOBDocID() throws Exception {
1334     Directory dir = newDirectory();
1335     IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
1336     writer.addDocument(new Document());
1337     IndexReader r = writer.getReader();
1338     writer.close();
1339     r.document(0);
1340     try {
1341       r.document(1);
1342       fail("did not hit exception");
1343     } catch (IllegalArgumentException iae) {
1344       // expected
1345     }
1346     r.close();
1347     dir.close();
1348   }
1349 }