1 package org.apache.lucene.index;
4 * Licensed to the Apache Software Foundation (ASF) under one or more
5 * contributor license agreements. See the NOTICE file distributed with
6 * this work for additional information regarding copyright ownership.
7 * The ASF licenses this file to You under the Apache License, Version 2.0
8 * (the "License"); you may not use this file except in compliance with
9 * the License. You may obtain a copy of the License at
11 * http://www.apache.org/licenses/LICENSE-2.0
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
20 import java.io.IOException;
21 import java.util.ArrayList;
22 import java.util.Arrays;
23 import java.util.Collection;
24 import java.util.HashMap;
25 import java.util.HashSet;
27 import java.util.List;
31 import java.util.concurrent.atomic.AtomicInteger;
32 import org.apache.lucene.document.Document;
33 import org.apache.lucene.document.FieldSelector;
34 import org.apache.lucene.search.Similarity;
35 import org.apache.lucene.index.FieldInfo.IndexOptions;
36 import org.apache.lucene.store.BufferedIndexInput;
37 import org.apache.lucene.store.Directory;
38 import org.apache.lucene.store.IndexInput;
39 import org.apache.lucene.util.BitVector;
40 import org.apache.lucene.util.CloseableThreadLocal;
41 import org.apache.lucene.util.StringHelper;
44 * @lucene.experimental
46 public class SegmentReader extends IndexReader implements Cloneable {
47 protected boolean readOnly;
49 private SegmentInfo si;
50 private int readBufferSize;
52 CloseableThreadLocal<FieldsReader> fieldsReaderLocal = new FieldsReaderLocal();
53 CloseableThreadLocal<TermVectorsReader> termVectorsLocal = new CloseableThreadLocal<TermVectorsReader>();
55 BitVector deletedDocs = null;
56 AtomicInteger deletedDocsRef = null;
57 private boolean deletedDocsDirty = false;
58 private boolean normsDirty = false;
60 // TODO: we should move this tracking into SegmentInfo;
61 // this way SegmentInfo.toString shows pending deletes
62 private int pendingDeleteCount;
64 private boolean rollbackHasChanges = false;
65 private boolean rollbackDeletedDocsDirty = false;
66 private boolean rollbackNormsDirty = false;
67 private SegmentInfo rollbackSegmentInfo;
68 private int rollbackPendingDeleteCount;
70 // optionally used for the .nrm file shared by multiple norms
71 IndexInput singleNormStream;
72 AtomicInteger singleNormRef;
74 SegmentCoreReaders core;
77 * Sets the initial value
79 private class FieldsReaderLocal extends CloseableThreadLocal<FieldsReader> {
81 protected FieldsReader initialValue() {
82 return (FieldsReader) core.getFieldsReaderOrig().clone();
86 Map<String,SegmentNorms> norms = new HashMap<String,SegmentNorms>();
89 * @throws CorruptIndexException if the index is corrupt
90 * @throws IOException if there is a low-level IO error
92 public static SegmentReader get(boolean readOnly, SegmentInfo si, int termInfosIndexDivisor) throws CorruptIndexException, IOException {
93 return get(readOnly, si.dir, si, BufferedIndexInput.BUFFER_SIZE, true, termInfosIndexDivisor);
97 * @throws CorruptIndexException if the index is corrupt
98 * @throws IOException if there is a low-level IO error
100 public static SegmentReader get(boolean readOnly,
104 boolean doOpenStores,
105 int termInfosIndexDivisor)
106 throws CorruptIndexException, IOException {
107 SegmentReader instance = readOnly ? new ReadOnlySegmentReader() : new SegmentReader();
108 instance.readOnly = readOnly;
110 instance.readBufferSize = readBufferSize;
112 boolean success = false;
115 instance.core = new SegmentCoreReaders(instance, dir, si, readBufferSize, termInfosIndexDivisor);
117 instance.core.openDocStores(si);
119 instance.loadDeletedDocs();
120 instance.openNorms(instance.core.cfsDir, readBufferSize);
124 // With lock-less commits, it's entirely possible (and
125 // fine) to hit a FileNotFound exception above. In
126 // this case, we want to explicitly close any subset
127 // of things that were opened so that we don't have to
128 // wait for a GC to do so.
136 void openDocStores() throws IOException {
137 core.openDocStores(si);
140 private boolean checkDeletedCounts() throws IOException {
141 final int recomputedCount = deletedDocs.getRecomputedCount();
143 assert deletedDocs.count() == recomputedCount : "deleted count=" + deletedDocs.count() + " vs recomputed count=" + recomputedCount;
145 assert si.getDelCount() == recomputedCount :
146 "delete count mismatch: info=" + si.getDelCount() + " vs BitVector=" + recomputedCount;
148 // Verify # deletes does not exceed maxDoc for this
150 assert si.getDelCount() <= maxDoc() :
151 "delete count mismatch: " + recomputedCount + ") exceeds max doc (" + maxDoc() + ") for segment " + si.name;
156 private void loadDeletedDocs() throws IOException {
157 // NOTE: the bitvector is stored using the regular directory, not cfs
158 if (hasDeletions(si)) {
159 deletedDocs = new BitVector(directory(), si.getDelFileName());
160 deletedDocsRef = new AtomicInteger(1);
161 assert checkDeletedCounts();
162 if (deletedDocs.size() != si.docCount) {
163 throw new CorruptIndexException("document count mismatch: deleted docs count " + deletedDocs.size() + " vs segment doc count " + si.docCount + " segment=" + si.name);
166 assert si.getDelCount() == 0;
170 * Clones the norm bytes. May be overridden by subclasses. New and experimental.
171 * @param bytes Byte array to clone
172 * @return New BitVector
174 protected byte[] cloneNormBytes(byte[] bytes) {
175 byte[] cloneBytes = new byte[bytes.length];
176 System.arraycopy(bytes, 0, cloneBytes, 0, bytes.length);
181 * Clones the deleteDocs BitVector. May be overridden by subclasses. New and experimental.
182 * @param bv BitVector to clone
183 * @return New BitVector
185 protected BitVector cloneDeletedDocs(BitVector bv) {
186 return (BitVector)bv.clone();
190 public final synchronized Object clone() {
192 return clone(readOnly); // Preserve current readOnly
193 } catch (Exception ex) {
194 throw new RuntimeException(ex);
199 public final synchronized IndexReader clone(boolean openReadOnly) throws CorruptIndexException, IOException {
200 return reopenSegment(si, true, openReadOnly);
204 public synchronized IndexReader reopen()
205 throws CorruptIndexException, IOException {
206 return reopenSegment(si, false, readOnly);
210 public synchronized IndexReader reopen(boolean openReadOnly)
211 throws CorruptIndexException, IOException {
212 return reopenSegment(si, false, openReadOnly);
215 synchronized SegmentReader reopenSegment(SegmentInfo si, boolean doClone, boolean openReadOnly) throws CorruptIndexException, IOException {
216 boolean deletionsUpToDate = (this.si.hasDeletions() == si.hasDeletions())
217 && (!si.hasDeletions() || this.si.getDelFileName().equals(si.getDelFileName()));
218 boolean normsUpToDate = true;
220 boolean[] fieldNormsChanged = new boolean[core.fieldInfos.size()];
221 final int fieldCount = core.fieldInfos.size();
222 for (int i = 0; i < fieldCount; i++) {
223 if (!this.si.getNormFileName(i).equals(si.getNormFileName(i))) {
224 normsUpToDate = false;
225 fieldNormsChanged[i] = true;
229 // if we're cloning we need to run through the reopenSegment logic
230 // also if both old and new readers aren't readonly, we clone to avoid sharing modifications
231 if (normsUpToDate && deletionsUpToDate && !doClone && openReadOnly && readOnly) {
235 // When cloning, the incoming SegmentInfos should not
236 // have any changes in it:
237 assert !doClone || (normsUpToDate && deletionsUpToDate);
240 SegmentReader clone = openReadOnly ? new ReadOnlySegmentReader() : new SegmentReader();
242 boolean success = false;
246 clone.readOnly = openReadOnly;
248 clone.readBufferSize = readBufferSize;
249 clone.pendingDeleteCount = pendingDeleteCount;
250 clone.readerFinishedListeners = readerFinishedListeners;
252 if (!openReadOnly && hasChanges) {
253 // My pending changes transfer to the new reader
254 clone.deletedDocsDirty = deletedDocsDirty;
255 clone.normsDirty = normsDirty;
256 clone.hasChanges = hasChanges;
261 if (deletedDocs != null) {
262 deletedDocsRef.incrementAndGet();
263 clone.deletedDocs = deletedDocs;
264 clone.deletedDocsRef = deletedDocsRef;
267 if (!deletionsUpToDate) {
269 assert clone.deletedDocs == null;
270 clone.loadDeletedDocs();
271 } else if (deletedDocs != null) {
272 deletedDocsRef.incrementAndGet();
273 clone.deletedDocs = deletedDocs;
274 clone.deletedDocsRef = deletedDocsRef;
278 clone.norms = new HashMap<String,SegmentNorms>();
281 for (int i = 0; i < fieldNormsChanged.length; i++) {
283 // Clone unchanged norms to the cloned reader
284 if (doClone || !fieldNormsChanged[i]) {
285 final String curField = core.fieldInfos.fieldInfo(i).name;
286 SegmentNorms norm = this.norms.get(curField);
288 clone.norms.put(curField, (SegmentNorms) norm.clone());
292 // If we are not cloning, then this will open anew
293 // any norms that have changed:
294 clone.openNorms(si.getUseCompoundFile() ? core.getCFSReader() : directory(), readBufferSize);
299 // An exception occurred during reopen, we have to decRef the norms
300 // that we incRef'ed already and close singleNormsStream and FieldsReader
309 protected void doCommit(Map<String,String> commitUserData) throws IOException {
312 boolean success = false;
314 commitChanges(commitUserData);
324 private synchronized void commitChanges(Map<String,String> commitUserData) throws IOException {
325 if (deletedDocsDirty) { // re-write deleted
328 assert deletedDocs.size() == si.docCount;
330 // We can write directly to the actual name (vs to a
331 // .tmp & renaming it) because the file is not live
332 // until segments file is written:
333 final String delFileName = si.getDelFileName();
334 boolean success = false;
336 deletedDocs.write(directory(), delFileName);
341 directory().deleteFile(delFileName);
342 } catch (Throwable t) {
343 // suppress this so we keep throwing the
344 // original exception
349 si.setDelCount(si.getDelCount()+pendingDeleteCount);
350 pendingDeleteCount = 0;
351 assert deletedDocs.count() == si.getDelCount(): "delete count mismatch during commit: info=" + si.getDelCount() + " vs BitVector=" + deletedDocs.count();
353 assert pendingDeleteCount == 0;
356 if (normsDirty) { // re-write norms
357 si.setNumFields(core.fieldInfos.size());
358 for (final SegmentNorms norm : norms.values()) {
364 deletedDocsDirty = false;
369 FieldsReader getFieldsReader() {
370 return fieldsReaderLocal.get();
374 protected void doClose() throws IOException {
375 termVectorsLocal.close();
376 fieldsReaderLocal.close();
378 if (deletedDocs != null) {
379 deletedDocsRef.decrementAndGet();
380 // null so if an app hangs on to us we still free most ram
384 for (final SegmentNorms norm : norms.values()) {
392 static boolean hasDeletions(SegmentInfo si) throws IOException {
393 // Don't call ensureOpen() here (it could affect performance)
394 return si.hasDeletions();
398 public boolean hasDeletions() {
399 // Don't call ensureOpen() here (it could affect performance)
400 return deletedDocs != null;
403 static boolean usesCompoundFile(SegmentInfo si) throws IOException {
404 return si.getUseCompoundFile();
407 static boolean hasSeparateNorms(SegmentInfo si) throws IOException {
408 return si.hasSeparateNorms();
412 protected void doDelete(int docNum) {
413 if (deletedDocs == null) {
414 deletedDocs = new BitVector(maxDoc());
415 deletedDocsRef = new AtomicInteger(1);
417 // there is more than 1 SegmentReader with a reference to this
418 // deletedDocs BitVector so decRef the current deletedDocsRef,
419 // clone the BitVector, create a new deletedDocsRef
420 if (deletedDocsRef.get() > 1) {
421 AtomicInteger oldRef = deletedDocsRef;
422 deletedDocs = cloneDeletedDocs(deletedDocs);
423 deletedDocsRef = new AtomicInteger(1);
424 oldRef.decrementAndGet();
426 deletedDocsDirty = true;
427 if (!deletedDocs.getAndSet(docNum)) {
428 pendingDeleteCount++;
433 protected void doUndeleteAll() {
434 deletedDocsDirty = false;
435 if (deletedDocs != null) {
436 assert deletedDocsRef != null;
437 deletedDocsRef.decrementAndGet();
439 deletedDocsRef = null;
440 pendingDeleteCount = 0;
444 assert deletedDocsRef == null;
445 assert pendingDeleteCount == 0;
449 List<String> files() throws IOException {
450 return new ArrayList<String>(si.files());
454 public TermEnum terms() {
456 return core.getTermsReader().terms();
460 public TermEnum terms(Term t) throws IOException {
462 return core.getTermsReader().terms(t);
465 FieldInfos fieldInfos() {
466 return core.fieldInfos;
470 public Document document(int n, FieldSelector fieldSelector) throws CorruptIndexException, IOException {
472 if (n < 0 || n >= maxDoc()) {
473 throw new IllegalArgumentException("docID must be >= 0 and < maxDoc=" + maxDoc() + " (got docID=" + n + ")");
475 return getFieldsReader().doc(n, fieldSelector);
479 public synchronized boolean isDeleted(int n) {
480 return (deletedDocs != null && deletedDocs.get(n));
484 public TermDocs termDocs(Term term) throws IOException {
486 return new AllTermDocs(this);
488 return super.termDocs(term);
493 public TermDocs termDocs() throws IOException {
495 return new SegmentTermDocs(this);
499 public TermPositions termPositions() throws IOException {
501 return new SegmentTermPositions(this);
505 public int docFreq(Term t) throws IOException {
507 TermInfo ti = core.getTermsReader().get(t);
515 public int numDocs() {
516 // Don't call ensureOpen() here (it could affect performance)
518 if (deletedDocs != null)
519 n -= deletedDocs.count();
524 public int maxDoc() {
525 // Don't call ensureOpen() here (it could affect performance)
530 * @see IndexReader#getFieldNames(org.apache.lucene.index.IndexReader.FieldOption)
533 public Collection<String> getFieldNames(IndexReader.FieldOption fieldOption) {
536 Set<String> fieldSet = new HashSet<String>();
537 for (int i = 0; i < core.fieldInfos.size(); i++) {
538 FieldInfo fi = core.fieldInfos.fieldInfo(i);
539 if (fieldOption == IndexReader.FieldOption.ALL) {
540 fieldSet.add(fi.name);
542 else if (!fi.isIndexed && fieldOption == IndexReader.FieldOption.UNINDEXED) {
543 fieldSet.add(fi.name);
545 else if (fi.indexOptions == IndexOptions.DOCS_ONLY && fieldOption == IndexReader.FieldOption.OMIT_TERM_FREQ_AND_POSITIONS) {
546 fieldSet.add(fi.name);
548 else if (fi.indexOptions == IndexOptions.DOCS_AND_FREQS && fieldOption == IndexReader.FieldOption.OMIT_POSITIONS) {
549 fieldSet.add(fi.name);
551 else if (fi.storePayloads && fieldOption == IndexReader.FieldOption.STORES_PAYLOADS) {
552 fieldSet.add(fi.name);
554 else if (fi.isIndexed && fieldOption == IndexReader.FieldOption.INDEXED) {
555 fieldSet.add(fi.name);
557 else if (fi.isIndexed && fi.storeTermVector == false && fieldOption == IndexReader.FieldOption.INDEXED_NO_TERMVECTOR) {
558 fieldSet.add(fi.name);
560 else if (fi.storeTermVector == true &&
561 fi.storePositionWithTermVector == false &&
562 fi.storeOffsetWithTermVector == false &&
563 fieldOption == IndexReader.FieldOption.TERMVECTOR) {
564 fieldSet.add(fi.name);
566 else if (fi.isIndexed && fi.storeTermVector && fieldOption == IndexReader.FieldOption.INDEXED_WITH_TERMVECTOR) {
567 fieldSet.add(fi.name);
569 else if (fi.storePositionWithTermVector && fi.storeOffsetWithTermVector == false && fieldOption == IndexReader.FieldOption.TERMVECTOR_WITH_POSITION) {
570 fieldSet.add(fi.name);
572 else if (fi.storeOffsetWithTermVector && fi.storePositionWithTermVector == false && fieldOption == IndexReader.FieldOption.TERMVECTOR_WITH_OFFSET) {
573 fieldSet.add(fi.name);
575 else if ((fi.storeOffsetWithTermVector && fi.storePositionWithTermVector) &&
576 fieldOption == IndexReader.FieldOption.TERMVECTOR_WITH_POSITION_OFFSET) {
577 fieldSet.add(fi.name);
584 public boolean hasNorms(String field) {
586 return norms.containsKey(field);
590 public byte[] norms(String field) throws IOException {
592 final SegmentNorms norm = norms.get(field);
594 // not indexed, or norms not stored
601 protected void doSetNorm(int doc, String field, byte value)
603 SegmentNorms norm = norms.get(field);
605 // field does not store norms
606 throw new IllegalStateException("Cannot setNorm for field " + field + ": norms were omitted");
610 norm.copyOnWrite()[doc] = value; // set the value
613 /** Read norms into a pre-allocated array. */
615 public synchronized void norms(String field, byte[] bytes, int offset)
619 SegmentNorms norm = norms.get(field);
621 Arrays.fill(bytes, offset, bytes.length, Similarity.getDefault().encodeNormValue(1.0f));
625 norm.bytes(bytes, offset, maxDoc());
629 /** @lucene.internal */
630 int getPostingsSkipInterval() {
631 return core.getTermsReader().getSkipInterval();
634 private void openNorms(Directory cfsDir, int readBufferSize) throws IOException {
635 long nextNormSeek = SegmentNorms.NORMS_HEADER.length; //skip header (header unused for now)
636 int maxDoc = maxDoc();
637 for (int i = 0; i < core.fieldInfos.size(); i++) {
638 FieldInfo fi = core.fieldInfos.fieldInfo(i);
639 if (norms.containsKey(fi.name)) {
640 // in case this SegmentReader is being re-opened, we might be able to
641 // reuse some norm instances and skip loading them here
644 if (fi.isIndexed && !fi.omitNorms) {
645 Directory d = directory();
646 String fileName = si.getNormFileName(fi.number);
647 if (!si.hasSeparateNorms(fi.number)) {
651 // singleNormFile means multiple norms share this file
652 boolean singleNormFile = IndexFileNames.matchesExtension(fileName, IndexFileNames.NORMS_EXTENSION);
653 IndexInput normInput = null;
656 if (singleNormFile) {
657 normSeek = nextNormSeek;
658 if (singleNormStream == null) {
659 singleNormStream = d.openInput(fileName, readBufferSize);
660 singleNormRef = new AtomicInteger(1);
662 singleNormRef.incrementAndGet();
664 // All norms in the .nrm file can share a single IndexInput since
665 // they are only used in a synchronized context.
666 // If this were to change in the future, a clone could be done here.
667 normInput = singleNormStream;
669 normInput = d.openInput(fileName);
670 // if the segment was created in 3.2 or after, we wrote the header for sure,
671 // and don't need to do the sketchy file size check. otherwise, we check
672 // if the size is exactly equal to maxDoc to detect a headerless file.
673 // NOTE: remove this check in Lucene 5.0!
674 String version = si.getVersion();
675 final boolean isUnversioned =
676 (version == null || StringHelper.getVersionComparator().compare(version, "3.2") < 0)
677 && normInput.length() == maxDoc();
681 normSeek = SegmentNorms.NORMS_HEADER.length;
685 norms.put(fi.name, new SegmentNorms(normInput, fi.number, normSeek, this));
686 nextNormSeek += maxDoc; // increment also if some norms are separate
691 boolean termsIndexLoaded() {
692 return core.termsIndexIsLoaded();
695 // NOTE: only called from IndexWriter when a near
696 // real-time reader is opened, or applyDeletes is run,
697 // sharing a segment that's still being merged. This
698 // method is not thread safe, and relies on the
699 // synchronization in IndexWriter
700 void loadTermsIndex(int termsIndexDivisor) throws IOException {
701 core.loadTermsIndex(si, termsIndexDivisor);
705 boolean normsClosed() {
706 if (singleNormStream != null) {
709 for (final SegmentNorms norm : norms.values()) {
710 if (norm.refCount > 0) {
718 boolean normsClosed(String field) {
719 return norms.get(field).refCount == 0;
723 * Create a clone from the initial TermVectorsReader and store it in the ThreadLocal.
724 * @return TermVectorsReader
726 TermVectorsReader getTermVectorsReader() {
727 TermVectorsReader tvReader = termVectorsLocal.get();
728 if (tvReader == null) {
729 TermVectorsReader orig = core.getTermVectorsReaderOrig();
734 tvReader = (TermVectorsReader) orig.clone();
735 } catch (CloneNotSupportedException cnse) {
739 termVectorsLocal.set(tvReader);
744 TermVectorsReader getTermVectorsReaderOrig() {
745 return core.getTermVectorsReaderOrig();
748 /** Return a term frequency vector for the specified document and field. The
749 * vector returned contains term numbers and frequencies for all terms in
750 * the specified field of this document, if the field had storeTermVector
751 * flag set. If the flag was not set, the method returns null.
752 * @throws IOException
755 public TermFreqVector getTermFreqVector(int docNumber, String field) throws IOException {
756 // Check if this field is invalid or has no stored term vector
758 FieldInfo fi = core.fieldInfos.fieldInfo(field);
759 if (fi == null || !fi.storeTermVector)
762 TermVectorsReader termVectorsReader = getTermVectorsReader();
763 if (termVectorsReader == null)
766 return termVectorsReader.get(docNumber, field);
771 public void getTermFreqVector(int docNumber, String field, TermVectorMapper mapper) throws IOException {
773 FieldInfo fi = core.fieldInfos.fieldInfo(field);
774 if (fi == null || !fi.storeTermVector)
777 TermVectorsReader termVectorsReader = getTermVectorsReader();
778 if (termVectorsReader == null) {
783 termVectorsReader.get(docNumber, field, mapper);
788 public void getTermFreqVector(int docNumber, TermVectorMapper mapper) throws IOException {
791 TermVectorsReader termVectorsReader = getTermVectorsReader();
792 if (termVectorsReader == null)
795 termVectorsReader.get(docNumber, mapper);
798 /** Return an array of term frequency vectors for the specified document.
799 * The array contains a vector for each vectorized field in the document.
800 * Each vector vector contains term numbers and frequencies for all terms
801 * in a given vectorized field.
802 * If no such fields existed, the method returns null.
803 * @throws IOException
806 public TermFreqVector[] getTermFreqVectors(int docNumber) throws IOException {
809 TermVectorsReader termVectorsReader = getTermVectorsReader();
810 if (termVectorsReader == null)
813 return termVectorsReader.get(docNumber);
818 public String toString() {
819 final StringBuilder buffer = new StringBuilder();
823 buffer.append(si.toString(core.dir, pendingDeleteCount));
824 return buffer.toString();
828 * Return the name of the segment this reader is reading.
830 public String getSegmentName() {
835 * Return the SegmentInfo of the segment this reader is reading.
837 SegmentInfo getSegmentInfo() {
841 void setSegmentInfo(SegmentInfo info) {
846 rollbackSegmentInfo = (SegmentInfo) si.clone();
847 rollbackHasChanges = hasChanges;
848 rollbackDeletedDocsDirty = deletedDocsDirty;
849 rollbackNormsDirty = normsDirty;
850 rollbackPendingDeleteCount = pendingDeleteCount;
851 for (SegmentNorms norm : norms.values()) {
852 norm.rollbackDirty = norm.dirty;
856 void rollbackCommit() {
857 si.reset(rollbackSegmentInfo);
858 hasChanges = rollbackHasChanges;
859 deletedDocsDirty = rollbackDeletedDocsDirty;
860 normsDirty = rollbackNormsDirty;
861 pendingDeleteCount = rollbackPendingDeleteCount;
862 for (SegmentNorms norm : norms.values()) {
863 norm.dirty = norm.rollbackDirty;
867 /** Returns the directory this index resides in. */
869 public Directory directory() {
870 // Don't ensureOpen here -- in certain cases, when a
871 // cloned/reopened reader needs to commit, it may call
872 // this method on the closed original reader
876 // This is necessary so that cloned SegmentReaders (which
877 // share the underlying postings data) will map to the
878 // same entry in the FieldCache. See LUCENE-1579.
880 public final Object getCoreCacheKey() {
881 return core.freqStream;
885 public Object getDeletesCacheKey() {
890 public long getUniqueTermCount() {
891 return core.getTermsReader().size();
895 * Lotsa tests did hacks like:<br/>
896 * SegmentReader reader = (SegmentReader) IndexReader.open(dir);<br/>
897 * They broke. This method serves as a hack to keep hacks working
898 * We do it with R/W access for the tests (BW compatibility)
899 * @deprecated Remove this when tests are fixed!
902 static SegmentReader getOnlySegmentReader(Directory dir) throws IOException {
903 return getOnlySegmentReader(IndexReader.open(dir,false));
906 static SegmentReader getOnlySegmentReader(IndexReader reader) {
907 if (reader instanceof SegmentReader)
908 return (SegmentReader) reader;
910 if (reader instanceof DirectoryReader) {
911 IndexReader[] subReaders = reader.getSequentialSubReaders();
912 if (subReaders.length != 1)
913 throw new IllegalArgumentException(reader + " has " + subReaders.length + " segments instead of exactly one");
915 return (SegmentReader) subReaders[0];
918 throw new IllegalArgumentException(reader + " is not a SegmentReader or a single-segment DirectoryReader");
922 public int getTermInfosIndexDivisor() {
923 return core.termsIndexDivisor;
927 protected void readerFinished() {
928 // Do nothing here -- we have more careful control on
929 // when to notify that a SegmentReader has finished,
930 // because a given core is shared across many cloned
931 // SegmentReaders. We only notify once that core is no
932 // longer used (all SegmentReaders sharing it have been