1 package org.apache.lucene.index;
4 * Licensed to the Apache Software Foundation (ASF) under one or more
5 * contributor license agreements. See the NOTICE file distributed with
6 * this work for additional information regarding copyright ownership.
7 * The ASF licenses this file to You under the Apache License, Version 2.0
8 * (the "License"); you may not use this file except in compliance with
9 * the License. You may obtain a copy of the License at
11 * http://www.apache.org/licenses/LICENSE-2.0
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
20 import java.io.IOException;
21 import java.util.ArrayList;
22 import java.util.Arrays;
23 import java.util.Collection;
24 import java.util.HashMap;
25 import java.util.HashSet;
27 import java.util.List;
31 import java.util.concurrent.atomic.AtomicInteger;
32 import org.apache.lucene.document.Document;
33 import org.apache.lucene.document.FieldSelector;
34 import org.apache.lucene.search.Similarity;
35 import org.apache.lucene.index.FieldInfo.IndexOptions;
36 import org.apache.lucene.store.BufferedIndexInput;
37 import org.apache.lucene.store.Directory;
38 import org.apache.lucene.store.IndexInput;
39 import org.apache.lucene.util.BitVector;
40 import org.apache.lucene.util.CloseableThreadLocal;
41 import org.apache.lucene.util.StringHelper;
44 * @lucene.experimental
46 public class SegmentReader extends IndexReader implements Cloneable {
47 protected boolean readOnly;
49 private SegmentInfo si;
50 private int readBufferSize;
52 CloseableThreadLocal<FieldsReader> fieldsReaderLocal = new FieldsReaderLocal();
53 CloseableThreadLocal<TermVectorsReader> termVectorsLocal = new CloseableThreadLocal<TermVectorsReader>();
55 BitVector deletedDocs = null;
56 AtomicInteger deletedDocsRef = null;
57 private boolean deletedDocsDirty = false;
58 private boolean normsDirty = false;
60 // TODO: we should move this tracking into SegmentInfo;
61 // this way SegmentInfo.toString shows pending deletes
62 private int pendingDeleteCount;
64 private boolean rollbackHasChanges = false;
65 private boolean rollbackDeletedDocsDirty = false;
66 private boolean rollbackNormsDirty = false;
67 private SegmentInfo rollbackSegmentInfo;
68 private int rollbackPendingDeleteCount;
70 // optionally used for the .nrm file shared by multiple norms
71 IndexInput singleNormStream;
72 AtomicInteger singleNormRef;
74 SegmentCoreReaders core;
77 * Sets the initial value
79 private class FieldsReaderLocal extends CloseableThreadLocal<FieldsReader> {
81 protected FieldsReader initialValue() {
82 return (FieldsReader) core.getFieldsReaderOrig().clone();
86 Map<String,SegmentNorms> norms = new HashMap<String,SegmentNorms>();
89 * @throws CorruptIndexException if the index is corrupt
90 * @throws IOException if there is a low-level IO error
92 public static SegmentReader get(boolean readOnly, SegmentInfo si, int termInfosIndexDivisor) throws CorruptIndexException, IOException {
93 return get(readOnly, si.dir, si, BufferedIndexInput.BUFFER_SIZE, true, termInfosIndexDivisor);
97 * @throws CorruptIndexException if the index is corrupt
98 * @throws IOException if there is a low-level IO error
100 public static SegmentReader get(boolean readOnly,
104 boolean doOpenStores,
105 int termInfosIndexDivisor)
106 throws CorruptIndexException, IOException {
107 SegmentReader instance = readOnly ? new ReadOnlySegmentReader() : new SegmentReader();
108 instance.readOnly = readOnly;
110 instance.readBufferSize = readBufferSize;
112 boolean success = false;
115 instance.core = new SegmentCoreReaders(instance, dir, si, readBufferSize, termInfosIndexDivisor);
117 instance.core.openDocStores(si);
119 instance.loadDeletedDocs();
120 instance.openNorms(instance.core.cfsDir, readBufferSize);
124 // With lock-less commits, it's entirely possible (and
125 // fine) to hit a FileNotFound exception above. In
126 // this case, we want to explicitly close any subset
127 // of things that were opened so that we don't have to
128 // wait for a GC to do so.
136 void openDocStores() throws IOException {
137 core.openDocStores(si);
140 private boolean checkDeletedCounts() throws IOException {
141 final int recomputedCount = deletedDocs.getRecomputedCount();
143 assert deletedDocs.count() == recomputedCount : "deleted count=" + deletedDocs.count() + " vs recomputed count=" + recomputedCount;
145 assert si.getDelCount() == recomputedCount :
146 "delete count mismatch: info=" + si.getDelCount() + " vs BitVector=" + recomputedCount;
148 // Verify # deletes does not exceed maxDoc for this
150 assert si.getDelCount() <= maxDoc() :
151 "delete count mismatch: " + recomputedCount + ") exceeds max doc (" + maxDoc() + ") for segment " + si.name;
156 private void loadDeletedDocs() throws IOException {
157 // NOTE: the bitvector is stored using the regular directory, not cfs
158 if (hasDeletions(si)) {
159 deletedDocs = new BitVector(directory(), si.getDelFileName());
160 deletedDocsRef = new AtomicInteger(1);
161 assert checkDeletedCounts();
162 if (deletedDocs.size() != si.docCount) {
163 throw new CorruptIndexException("document count mismatch: deleted docs count " + deletedDocs.size() + " vs segment doc count " + si.docCount + " segment=" + si.name);
166 assert si.getDelCount() == 0;
170 * Clones the norm bytes. May be overridden by subclasses. New and experimental.
171 * @param bytes Byte array to clone
172 * @return New BitVector
174 protected byte[] cloneNormBytes(byte[] bytes) {
175 byte[] cloneBytes = new byte[bytes.length];
176 System.arraycopy(bytes, 0, cloneBytes, 0, bytes.length);
181 * Clones the deleteDocs BitVector. May be overridden by subclasses. New and experimental.
182 * @param bv BitVector to clone
183 * @return New BitVector
185 protected BitVector cloneDeletedDocs(BitVector bv) {
187 return (BitVector)bv.clone();
191 public final synchronized Object clone() {
193 return clone(readOnly); // Preserve current readOnly
194 } catch (Exception ex) {
195 throw new RuntimeException(ex);
200 public final synchronized IndexReader clone(boolean openReadOnly) throws CorruptIndexException, IOException {
201 return reopenSegment(si, true, openReadOnly);
205 protected synchronized IndexReader doOpenIfChanged()
206 throws CorruptIndexException, IOException {
207 return reopenSegment(si, false, readOnly);
211 protected synchronized IndexReader doOpenIfChanged(boolean openReadOnly)
212 throws CorruptIndexException, IOException {
213 return reopenSegment(si, false, openReadOnly);
216 synchronized SegmentReader reopenSegment(SegmentInfo si, boolean doClone, boolean openReadOnly) throws CorruptIndexException, IOException {
218 boolean deletionsUpToDate = (this.si.hasDeletions() == si.hasDeletions())
219 && (!si.hasDeletions() || this.si.getDelFileName().equals(si.getDelFileName()));
220 boolean normsUpToDate = true;
222 boolean[] fieldNormsChanged = new boolean[core.fieldInfos.size()];
223 final int fieldCount = core.fieldInfos.size();
224 for (int i = 0; i < fieldCount; i++) {
225 if (!this.si.getNormFileName(i).equals(si.getNormFileName(i))) {
226 normsUpToDate = false;
227 fieldNormsChanged[i] = true;
231 // if we're cloning we need to run through the reopenSegment logic
232 // also if both old and new readers aren't readonly, we clone to avoid sharing modifications
233 if (normsUpToDate && deletionsUpToDate && !doClone && openReadOnly && readOnly) {
237 // When cloning, the incoming SegmentInfos should not
238 // have any changes in it:
239 assert !doClone || (normsUpToDate && deletionsUpToDate);
242 SegmentReader clone = openReadOnly ? new ReadOnlySegmentReader() : new SegmentReader();
244 boolean success = false;
248 clone.readOnly = openReadOnly;
250 clone.readBufferSize = readBufferSize;
251 clone.pendingDeleteCount = pendingDeleteCount;
252 clone.readerFinishedListeners = readerFinishedListeners;
254 if (!openReadOnly && hasChanges) {
255 // My pending changes transfer to the new reader
256 clone.deletedDocsDirty = deletedDocsDirty;
257 clone.normsDirty = normsDirty;
258 clone.hasChanges = hasChanges;
263 if (deletedDocs != null) {
264 deletedDocsRef.incrementAndGet();
265 clone.deletedDocs = deletedDocs;
266 clone.deletedDocsRef = deletedDocsRef;
269 if (!deletionsUpToDate) {
271 assert clone.deletedDocs == null;
272 clone.loadDeletedDocs();
273 } else if (deletedDocs != null) {
274 deletedDocsRef.incrementAndGet();
275 clone.deletedDocs = deletedDocs;
276 clone.deletedDocsRef = deletedDocsRef;
280 clone.norms = new HashMap<String,SegmentNorms>();
283 for (int i = 0; i < fieldNormsChanged.length; i++) {
285 // Clone unchanged norms to the cloned reader
286 if (doClone || !fieldNormsChanged[i]) {
287 final String curField = core.fieldInfos.fieldInfo(i).name;
288 SegmentNorms norm = this.norms.get(curField);
290 clone.norms.put(curField, (SegmentNorms) norm.clone());
294 // If we are not cloning, then this will open anew
295 // any norms that have changed:
296 clone.openNorms(si.getUseCompoundFile() ? core.getCFSReader() : directory(), readBufferSize);
301 // An exception occurred during reopen, we have to decRef the norms
302 // that we incRef'ed already and close singleNormsStream and FieldsReader
311 protected void doCommit(Map<String,String> commitUserData) throws IOException {
314 boolean success = false;
316 commitChanges(commitUserData);
326 private synchronized void commitChanges(Map<String,String> commitUserData) throws IOException {
327 if (deletedDocsDirty) { // re-write deleted
330 assert deletedDocs.size() == si.docCount;
332 // We can write directly to the actual name (vs to a
333 // .tmp & renaming it) because the file is not live
334 // until segments file is written:
335 final String delFileName = si.getDelFileName();
336 boolean success = false;
338 deletedDocs.write(directory(), delFileName);
343 directory().deleteFile(delFileName);
344 } catch (Throwable t) {
345 // suppress this so we keep throwing the
346 // original exception
351 si.setDelCount(si.getDelCount()+pendingDeleteCount);
352 pendingDeleteCount = 0;
353 assert deletedDocs.count() == si.getDelCount(): "delete count mismatch during commit: info=" + si.getDelCount() + " vs BitVector=" + deletedDocs.count();
355 assert pendingDeleteCount == 0;
358 if (normsDirty) { // re-write norms
359 si.setNumFields(core.fieldInfos.size());
360 for (final SegmentNorms norm : norms.values()) {
366 deletedDocsDirty = false;
371 FieldsReader getFieldsReader() {
372 return fieldsReaderLocal.get();
376 protected void doClose() throws IOException {
377 termVectorsLocal.close();
378 fieldsReaderLocal.close();
380 if (deletedDocs != null) {
381 deletedDocsRef.decrementAndGet();
382 // null so if an app hangs on to us we still free most ram
386 for (final SegmentNorms norm : norms.values()) {
394 static boolean hasDeletions(SegmentInfo si) throws IOException {
395 // Don't call ensureOpen() here (it could affect performance)
396 return si.hasDeletions();
400 public boolean hasDeletions() {
401 // Don't call ensureOpen() here (it could affect performance)
402 return deletedDocs != null;
405 static boolean usesCompoundFile(SegmentInfo si) throws IOException {
406 return si.getUseCompoundFile();
409 static boolean hasSeparateNorms(SegmentInfo si) throws IOException {
410 return si.hasSeparateNorms();
414 protected void doDelete(int docNum) {
415 if (deletedDocs == null) {
416 deletedDocs = new BitVector(maxDoc());
417 deletedDocsRef = new AtomicInteger(1);
419 // there is more than 1 SegmentReader with a reference to this
420 // deletedDocs BitVector so decRef the current deletedDocsRef,
421 // clone the BitVector, create a new deletedDocsRef
422 if (deletedDocsRef.get() > 1) {
423 AtomicInteger oldRef = deletedDocsRef;
424 deletedDocs = cloneDeletedDocs(deletedDocs);
425 deletedDocsRef = new AtomicInteger(1);
426 oldRef.decrementAndGet();
428 deletedDocsDirty = true;
429 if (!deletedDocs.getAndSet(docNum)) {
430 pendingDeleteCount++;
435 protected void doUndeleteAll() {
436 deletedDocsDirty = false;
437 if (deletedDocs != null) {
438 assert deletedDocsRef != null;
439 deletedDocsRef.decrementAndGet();
441 deletedDocsRef = null;
442 pendingDeleteCount = 0;
446 assert deletedDocsRef == null;
447 assert pendingDeleteCount == 0;
451 List<String> files() throws IOException {
452 return new ArrayList<String>(si.files());
456 public TermEnum terms() {
458 return core.getTermsReader().terms();
462 public TermEnum terms(Term t) throws IOException {
464 return core.getTermsReader().terms(t);
467 FieldInfos fieldInfos() {
468 return core.fieldInfos;
472 public Document document(int n, FieldSelector fieldSelector) throws CorruptIndexException, IOException {
474 if (n < 0 || n >= maxDoc()) {
475 throw new IllegalArgumentException("docID must be >= 0 and < maxDoc=" + maxDoc() + " (got docID=" + n + ")");
477 return getFieldsReader().doc(n, fieldSelector);
481 public synchronized boolean isDeleted(int n) {
482 return (deletedDocs != null && deletedDocs.get(n));
486 public TermDocs termDocs(Term term) throws IOException {
488 return new AllTermDocs(this);
490 return super.termDocs(term);
495 public TermDocs termDocs() throws IOException {
497 return new SegmentTermDocs(this);
501 public TermPositions termPositions() throws IOException {
503 return new SegmentTermPositions(this);
507 public int docFreq(Term t) throws IOException {
509 TermInfo ti = core.getTermsReader().get(t);
517 public int numDocs() {
518 // Don't call ensureOpen() here (it could affect performance)
520 if (deletedDocs != null)
521 n -= deletedDocs.count();
526 public int maxDoc() {
527 // Don't call ensureOpen() here (it could affect performance)
532 * @see IndexReader#getFieldNames(org.apache.lucene.index.IndexReader.FieldOption)
535 public Collection<String> getFieldNames(IndexReader.FieldOption fieldOption) {
538 Set<String> fieldSet = new HashSet<String>();
539 for (int i = 0; i < core.fieldInfos.size(); i++) {
540 FieldInfo fi = core.fieldInfos.fieldInfo(i);
541 if (fieldOption == IndexReader.FieldOption.ALL) {
542 fieldSet.add(fi.name);
544 else if (!fi.isIndexed && fieldOption == IndexReader.FieldOption.UNINDEXED) {
545 fieldSet.add(fi.name);
547 else if (fi.indexOptions == IndexOptions.DOCS_ONLY && fieldOption == IndexReader.FieldOption.OMIT_TERM_FREQ_AND_POSITIONS) {
548 fieldSet.add(fi.name);
550 else if (fi.indexOptions == IndexOptions.DOCS_AND_FREQS && fieldOption == IndexReader.FieldOption.OMIT_POSITIONS) {
551 fieldSet.add(fi.name);
553 else if (fi.storePayloads && fieldOption == IndexReader.FieldOption.STORES_PAYLOADS) {
554 fieldSet.add(fi.name);
556 else if (fi.isIndexed && fieldOption == IndexReader.FieldOption.INDEXED) {
557 fieldSet.add(fi.name);
559 else if (fi.isIndexed && fi.storeTermVector == false && fieldOption == IndexReader.FieldOption.INDEXED_NO_TERMVECTOR) {
560 fieldSet.add(fi.name);
562 else if (fi.storeTermVector == true &&
563 fi.storePositionWithTermVector == false &&
564 fi.storeOffsetWithTermVector == false &&
565 fieldOption == IndexReader.FieldOption.TERMVECTOR) {
566 fieldSet.add(fi.name);
568 else if (fi.isIndexed && fi.storeTermVector && fieldOption == IndexReader.FieldOption.INDEXED_WITH_TERMVECTOR) {
569 fieldSet.add(fi.name);
571 else if (fi.storePositionWithTermVector && fi.storeOffsetWithTermVector == false && fieldOption == IndexReader.FieldOption.TERMVECTOR_WITH_POSITION) {
572 fieldSet.add(fi.name);
574 else if (fi.storeOffsetWithTermVector && fi.storePositionWithTermVector == false && fieldOption == IndexReader.FieldOption.TERMVECTOR_WITH_OFFSET) {
575 fieldSet.add(fi.name);
577 else if ((fi.storeOffsetWithTermVector && fi.storePositionWithTermVector) &&
578 fieldOption == IndexReader.FieldOption.TERMVECTOR_WITH_POSITION_OFFSET) {
579 fieldSet.add(fi.name);
586 public boolean hasNorms(String field) {
588 return norms.containsKey(field);
592 public byte[] norms(String field) throws IOException {
594 final SegmentNorms norm = norms.get(field);
596 // not indexed, or norms not stored
603 protected void doSetNorm(int doc, String field, byte value)
605 SegmentNorms norm = norms.get(field);
607 // field does not store norms
608 throw new IllegalStateException("Cannot setNorm for field " + field + ": norms were omitted");
612 norm.copyOnWrite()[doc] = value; // set the value
615 /** Read norms into a pre-allocated array. */
617 public synchronized void norms(String field, byte[] bytes, int offset)
621 SegmentNorms norm = norms.get(field);
623 Arrays.fill(bytes, offset, bytes.length, Similarity.getDefault().encodeNormValue(1.0f));
627 norm.bytes(bytes, offset, maxDoc());
631 /** @lucene.internal */
632 int getPostingsSkipInterval() {
633 return core.getTermsReader().getSkipInterval();
636 private void openNorms(Directory cfsDir, int readBufferSize) throws IOException {
637 long nextNormSeek = SegmentNorms.NORMS_HEADER.length; //skip header (header unused for now)
638 int maxDoc = maxDoc();
639 for (int i = 0; i < core.fieldInfos.size(); i++) {
640 FieldInfo fi = core.fieldInfos.fieldInfo(i);
641 if (norms.containsKey(fi.name)) {
642 // in case this SegmentReader is being re-opened, we might be able to
643 // reuse some norm instances and skip loading them here
646 if (fi.isIndexed && !fi.omitNorms) {
647 Directory d = directory();
648 String fileName = si.getNormFileName(fi.number);
649 if (!si.hasSeparateNorms(fi.number)) {
653 // singleNormFile means multiple norms share this file
654 boolean singleNormFile = IndexFileNames.matchesExtension(fileName, IndexFileNames.NORMS_EXTENSION);
655 IndexInput normInput = null;
658 if (singleNormFile) {
659 normSeek = nextNormSeek;
660 if (singleNormStream == null) {
661 singleNormStream = d.openInput(fileName, readBufferSize);
662 singleNormRef = new AtomicInteger(1);
664 singleNormRef.incrementAndGet();
666 // All norms in the .nrm file can share a single IndexInput since
667 // they are only used in a synchronized context.
668 // If this were to change in the future, a clone could be done here.
669 normInput = singleNormStream;
671 normInput = d.openInput(fileName);
672 // if the segment was created in 3.2 or after, we wrote the header for sure,
673 // and don't need to do the sketchy file size check. otherwise, we check
674 // if the size is exactly equal to maxDoc to detect a headerless file.
675 // NOTE: remove this check in Lucene 5.0!
676 String version = si.getVersion();
677 final boolean isUnversioned =
678 (version == null || StringHelper.getVersionComparator().compare(version, "3.2") < 0)
679 && normInput.length() == maxDoc();
683 normSeek = SegmentNorms.NORMS_HEADER.length;
687 norms.put(fi.name, new SegmentNorms(normInput, fi.number, normSeek, this));
688 nextNormSeek += maxDoc; // increment also if some norms are separate
693 boolean termsIndexLoaded() {
694 return core.termsIndexIsLoaded();
697 // NOTE: only called from IndexWriter when a near
698 // real-time reader is opened, or applyDeletes is run,
699 // sharing a segment that's still being merged. This
700 // method is not thread safe, and relies on the
701 // synchronization in IndexWriter
702 void loadTermsIndex(int termsIndexDivisor) throws IOException {
703 core.loadTermsIndex(si, termsIndexDivisor);
707 boolean normsClosed() {
708 if (singleNormStream != null) {
711 for (final SegmentNorms norm : norms.values()) {
712 if (norm.refCount > 0) {
720 boolean normsClosed(String field) {
721 return norms.get(field).refCount == 0;
725 * Create a clone from the initial TermVectorsReader and store it in the ThreadLocal.
726 * @return TermVectorsReader
728 TermVectorsReader getTermVectorsReader() {
729 TermVectorsReader tvReader = termVectorsLocal.get();
730 if (tvReader == null) {
731 TermVectorsReader orig = core.getTermVectorsReaderOrig();
736 tvReader = (TermVectorsReader) orig.clone();
737 } catch (CloneNotSupportedException cnse) {
741 termVectorsLocal.set(tvReader);
746 TermVectorsReader getTermVectorsReaderOrig() {
747 return core.getTermVectorsReaderOrig();
750 /** Return a term frequency vector for the specified document and field. The
751 * vector returned contains term numbers and frequencies for all terms in
752 * the specified field of this document, if the field had storeTermVector
753 * flag set. If the flag was not set, the method returns null.
754 * @throws IOException
757 public TermFreqVector getTermFreqVector(int docNumber, String field) throws IOException {
758 // Check if this field is invalid or has no stored term vector
760 FieldInfo fi = core.fieldInfos.fieldInfo(field);
761 if (fi == null || !fi.storeTermVector)
764 TermVectorsReader termVectorsReader = getTermVectorsReader();
765 if (termVectorsReader == null)
768 return termVectorsReader.get(docNumber, field);
773 public void getTermFreqVector(int docNumber, String field, TermVectorMapper mapper) throws IOException {
775 FieldInfo fi = core.fieldInfos.fieldInfo(field);
776 if (fi == null || !fi.storeTermVector)
779 TermVectorsReader termVectorsReader = getTermVectorsReader();
780 if (termVectorsReader == null) {
785 termVectorsReader.get(docNumber, field, mapper);
790 public void getTermFreqVector(int docNumber, TermVectorMapper mapper) throws IOException {
793 TermVectorsReader termVectorsReader = getTermVectorsReader();
794 if (termVectorsReader == null)
797 termVectorsReader.get(docNumber, mapper);
800 /** Return an array of term frequency vectors for the specified document.
801 * The array contains a vector for each vectorized field in the document.
802 * Each vector vector contains term numbers and frequencies for all terms
803 * in a given vectorized field.
804 * If no such fields existed, the method returns null.
805 * @throws IOException
808 public TermFreqVector[] getTermFreqVectors(int docNumber) throws IOException {
811 TermVectorsReader termVectorsReader = getTermVectorsReader();
812 if (termVectorsReader == null)
815 return termVectorsReader.get(docNumber);
820 public String toString() {
821 final StringBuilder buffer = new StringBuilder();
825 buffer.append(si.toString(core.dir, pendingDeleteCount));
826 return buffer.toString();
830 * Return the name of the segment this reader is reading.
832 public String getSegmentName() {
837 * Return the SegmentInfo of the segment this reader is reading.
839 SegmentInfo getSegmentInfo() {
843 void setSegmentInfo(SegmentInfo info) {
848 rollbackSegmentInfo = (SegmentInfo) si.clone();
849 rollbackHasChanges = hasChanges;
850 rollbackDeletedDocsDirty = deletedDocsDirty;
851 rollbackNormsDirty = normsDirty;
852 rollbackPendingDeleteCount = pendingDeleteCount;
853 for (SegmentNorms norm : norms.values()) {
854 norm.rollbackDirty = norm.dirty;
858 void rollbackCommit() {
859 si.reset(rollbackSegmentInfo);
860 hasChanges = rollbackHasChanges;
861 deletedDocsDirty = rollbackDeletedDocsDirty;
862 normsDirty = rollbackNormsDirty;
863 pendingDeleteCount = rollbackPendingDeleteCount;
864 for (SegmentNorms norm : norms.values()) {
865 norm.dirty = norm.rollbackDirty;
869 /** Returns the directory this index resides in. */
871 public Directory directory() {
872 // Don't ensureOpen here -- in certain cases, when a
873 // cloned/reopened reader needs to commit, it may call
874 // this method on the closed original reader
878 // This is necessary so that cloned SegmentReaders (which
879 // share the underlying postings data) will map to the
880 // same entry in the FieldCache. See LUCENE-1579.
882 public final Object getCoreCacheKey() {
883 return core.freqStream;
887 public Object getDeletesCacheKey() {
892 public long getUniqueTermCount() {
893 return core.getTermsReader().size();
897 * Lotsa tests did hacks like:<br/>
898 * SegmentReader reader = (SegmentReader) IndexReader.open(dir);<br/>
899 * They broke. This method serves as a hack to keep hacks working
900 * We do it with R/W access for the tests (BW compatibility)
901 * @deprecated Remove this when tests are fixed!
904 static SegmentReader getOnlySegmentReader(Directory dir) throws IOException {
905 return getOnlySegmentReader(IndexReader.open(dir,false));
908 static SegmentReader getOnlySegmentReader(IndexReader reader) {
909 if (reader instanceof SegmentReader)
910 return (SegmentReader) reader;
912 if (reader instanceof DirectoryReader) {
913 IndexReader[] subReaders = reader.getSequentialSubReaders();
914 if (subReaders.length != 1)
915 throw new IllegalArgumentException(reader + " has " + subReaders.length + " segments instead of exactly one");
917 return (SegmentReader) subReaders[0];
920 throw new IllegalArgumentException(reader + " is not a SegmentReader or a single-segment DirectoryReader");
924 public int getTermInfosIndexDivisor() {
925 return core.termsIndexDivisor;
929 protected void readerFinished() {
930 // Do nothing here -- we have more careful control on
931 // when to notify that a SegmentReader has finished,
932 // because a given core is shared across many cloned
933 // SegmentReaders. We only notify once that core is no
934 // longer used (all SegmentReaders sharing it have been