1 package org.apache.lucene.search;
4 * Licensed to the Apache Software Foundation (ASF) under one or more
5 * contributor license agreements. See the NOTICE file distributed with
6 * this work for additional information regarding copyright ownership.
7 * The ASF licenses this file to You under the Apache License, Version 2.0
8 * (the "License"); you may not use this file except in compliance with
9 * the License. You may obtain a copy of the License at
11 * http://www.apache.org/licenses/LICENSE-2.0
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
20 import java.io.IOException;
22 import org.apache.lucene.document.Document;
23 import org.apache.lucene.document.Field;
24 import org.apache.lucene.analysis.MockAnalyzer;
25 import org.apache.lucene.index.IndexReader;
26 import org.apache.lucene.index.RandomIndexWriter;
27 import org.apache.lucene.index.SerialMergeScheduler;
28 import org.apache.lucene.index.Term;
29 import org.apache.lucene.store.Directory;
30 import org.apache.lucene.util.LuceneTestCase;
31 import org.apache.lucene.util.OpenBitSet;
32 import org.apache.lucene.util.OpenBitSetDISI;
33 import org.apache.lucene.util._TestUtil;
35 public class TestCachingWrapperFilter extends LuceneTestCase {
37 public void testCachingWorks() throws Exception {
38 Directory dir = newDirectory();
39 RandomIndexWriter writer = new RandomIndexWriter(random, dir);
42 IndexReader reader = IndexReader.open(dir, true);
44 MockFilter filter = new MockFilter();
45 CachingWrapperFilter cacher = new CachingWrapperFilter(filter);
47 // first time, nested filter is called
48 cacher.getDocIdSet(reader);
49 assertTrue("first time", filter.wasCalled());
51 // make sure no exception if cache is holding the wrong docIdSet
52 cacher.getDocIdSet(reader);
54 // second time, nested filter should not be called
56 cacher.getDocIdSet(reader);
57 assertFalse("second time", filter.wasCalled());
63 public void testNullDocIdSet() throws Exception {
64 Directory dir = newDirectory();
65 RandomIndexWriter writer = new RandomIndexWriter(random, dir);
68 IndexReader reader = IndexReader.open(dir, true);
70 final Filter filter = new Filter() {
72 public DocIdSet getDocIdSet(IndexReader reader) {
76 CachingWrapperFilter cacher = new CachingWrapperFilter(filter);
78 // the caching filter should return the empty set constant
79 assertSame(DocIdSet.EMPTY_DOCIDSET, cacher.getDocIdSet(reader));
85 public void testNullDocIdSetIterator() throws Exception {
86 Directory dir = newDirectory();
87 RandomIndexWriter writer = new RandomIndexWriter(random, dir);
90 IndexReader reader = IndexReader.open(dir, true);
92 final Filter filter = new Filter() {
94 public DocIdSet getDocIdSet(IndexReader reader) {
95 return new DocIdSet() {
97 public DocIdSetIterator iterator() {
103 CachingWrapperFilter cacher = new CachingWrapperFilter(filter);
105 // the caching filter should return the empty set constant
106 assertSame(DocIdSet.EMPTY_DOCIDSET, cacher.getDocIdSet(reader));
112 private static void assertDocIdSetCacheable(IndexReader reader, Filter filter, boolean shouldCacheable) throws IOException {
113 final CachingWrapperFilter cacher = new CachingWrapperFilter(filter);
114 final DocIdSet originalSet = filter.getDocIdSet(reader.getSequentialSubReaders()[0]);
115 final DocIdSet cachedSet = cacher.getDocIdSet(reader.getSequentialSubReaders()[0]);
116 assertTrue(cachedSet.isCacheable());
117 assertEquals(shouldCacheable, originalSet.isCacheable());
118 //System.out.println("Original: "+originalSet.getClass().getName()+" -- cached: "+cachedSet.getClass().getName());
119 if (originalSet.isCacheable()) {
120 assertEquals("Cached DocIdSet must be of same class like uncached, if cacheable", originalSet.getClass(), cachedSet.getClass());
122 assertTrue("Cached DocIdSet must be an OpenBitSet if the original one was not cacheable (got " + cachedSet + ")", cachedSet instanceof OpenBitSetDISI || cachedSet == DocIdSet.EMPTY_DOCIDSET);
126 public void testIsCacheAble() throws Exception {
127 Directory dir = newDirectory();
128 RandomIndexWriter writer = new RandomIndexWriter(random, dir);
129 writer.addDocument(new Document());
132 IndexReader reader = IndexReader.open(dir, true);
135 assertDocIdSetCacheable(reader, new QueryWrapperFilter(new TermQuery(new Term("test","value"))), false);
136 // returns default empty docidset, always cacheable:
137 assertDocIdSetCacheable(reader, NumericRangeFilter.newIntRange("test", Integer.valueOf(10000), Integer.valueOf(-10000), true, true), true);
139 assertDocIdSetCacheable(reader, FieldCacheRangeFilter.newIntRange("test", Integer.valueOf(10), Integer.valueOf(20), true, true), true);
140 // a openbitset filter is always cacheable
141 assertDocIdSetCacheable(reader, new Filter() {
143 public DocIdSet getDocIdSet(IndexReader reader) {
144 return new OpenBitSet();
152 public void testEnforceDeletions() throws Exception {
153 Directory dir = newDirectory();
154 RandomIndexWriter writer = new RandomIndexWriter(
157 newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
158 setMergeScheduler(new SerialMergeScheduler()).
159 // asserts below requires no unexpected merges:
160 setMergePolicy(newLogMergePolicy(10))
163 // NOTE: cannot use writer.getReader because RIW (on
164 // flipping a coin) may give us a newly opened reader,
165 // but we use .reopen on this reader below and expect to
166 // (must) get an NRT reader:
167 IndexReader reader = IndexReader.open(writer.w, true);
168 // same reason we don't wrap?
169 IndexSearcher searcher = newSearcher(reader, false);
171 // add a doc, refresh the reader, and check that its there
172 Document doc = new Document();
173 doc.add(newField("id", "1", Field.Store.YES, Field.Index.NOT_ANALYZED));
174 writer.addDocument(doc);
176 reader = refreshReader(reader);
178 searcher = newSearcher(reader, false);
180 TopDocs docs = searcher.search(new MatchAllDocsQuery(), 1);
181 assertEquals("Should find a hit...", 1, docs.totalHits);
183 final Filter startFilter = new QueryWrapperFilter(new TermQuery(new Term("id", "1")));
186 CachingWrapperFilter filter = new CachingWrapperFilter(startFilter, CachingWrapperFilter.DeletesMode.IGNORE);
188 docs = searcher.search(new MatchAllDocsQuery(), filter, 1);
189 assertEquals("[query + filter] Should find a hit...", 1, docs.totalHits);
190 ConstantScoreQuery constantScore = new ConstantScoreQuery(filter);
191 docs = searcher.search(constantScore, 1);
192 assertEquals("[just filter] Should find a hit...", 1, docs.totalHits);
194 // now delete the doc, refresh the reader, and see that it's not there
195 _TestUtil.keepFullyDeletedSegments(writer.w);
196 writer.deleteDocuments(new Term("id", "1"));
198 reader = refreshReader(reader);
200 searcher = newSearcher(reader, false);
202 docs = searcher.search(new MatchAllDocsQuery(), filter, 1);
203 assertEquals("[query + filter] Should *not* find a hit...", 0, docs.totalHits);
205 docs = searcher.search(constantScore, 1);
206 assertEquals("[just filter] Should find a hit...", 1, docs.totalHits);
209 // force cache to regenerate:
210 filter = new CachingWrapperFilter(startFilter, CachingWrapperFilter.DeletesMode.RECACHE);
212 writer.addDocument(doc);
214 reader = refreshReader(reader);
216 searcher = newSearcher(reader, false);
218 docs = searcher.search(new MatchAllDocsQuery(), filter, 1);
220 assertEquals("[query + filter] Should find a hit...", 1, docs.totalHits);
222 constantScore = new ConstantScoreQuery(filter);
223 docs = searcher.search(constantScore, 1);
224 assertEquals("[just filter] Should find a hit...", 1, docs.totalHits);
226 // NOTE: important to hold ref here so GC doesn't clear
227 // the cache entry! Else the assert below may sometimes
229 IndexReader oldReader = reader;
231 // make sure we get a cache hit when we reopen reader
232 // that had no change to deletions
233 reader = refreshReader(reader);
234 assertTrue(reader != oldReader);
236 searcher = newSearcher(reader, false);
237 int missCount = filter.missCount;
238 docs = searcher.search(constantScore, 1);
239 assertEquals("[just filter] Should find a hit...", 1, docs.totalHits);
240 assertEquals(missCount, filter.missCount);
242 // now delete the doc, refresh the reader, and see that it's not there
243 writer.deleteDocuments(new Term("id", "1"));
245 reader = refreshReader(reader);
247 searcher = newSearcher(reader, false);
249 missCount = filter.missCount;
250 docs = searcher.search(new MatchAllDocsQuery(), filter, 1);
251 assertEquals(missCount+1, filter.missCount);
252 assertEquals("[query + filter] Should *not* find a hit...", 0, docs.totalHits);
253 docs = searcher.search(constantScore, 1);
254 assertEquals("[just filter] Should *not* find a hit...", 0, docs.totalHits);
257 // apply deletions dynamically
258 filter = new CachingWrapperFilter(startFilter, CachingWrapperFilter.DeletesMode.DYNAMIC);
260 writer.addDocument(doc);
261 reader = refreshReader(reader);
263 searcher = newSearcher(reader, false);
265 docs = searcher.search(new MatchAllDocsQuery(), filter, 1);
266 assertEquals("[query + filter] Should find a hit...", 1, docs.totalHits);
267 constantScore = new ConstantScoreQuery(filter);
268 docs = searcher.search(constantScore, 1);
269 assertEquals("[just filter] Should find a hit...", 1, docs.totalHits);
271 // now delete the doc, refresh the reader, and see that it's not there
272 writer.deleteDocuments(new Term("id", "1"));
274 reader = refreshReader(reader);
276 searcher = newSearcher(reader, false);
278 docs = searcher.search(new MatchAllDocsQuery(), filter, 1);
279 assertEquals("[query + filter] Should *not* find a hit...", 0, docs.totalHits);
281 missCount = filter.missCount;
282 docs = searcher.search(constantScore, 1);
283 assertEquals("[just filter] Should *not* find a hit...", 0, docs.totalHits);
285 // doesn't count as a miss
286 assertEquals(missCount, filter.missCount);
288 // NOTE: silliness to make sure JRE does not optimize
289 // away our holding onto oldReader to prevent
290 // CachingWrapperFilter's WeakHashMap from dropping the
292 assertTrue(oldReader != null);
300 private static IndexReader refreshReader(IndexReader reader) throws IOException {
301 IndexReader oldReader = reader;
302 reader = reader.reopen();
303 if (reader != oldReader) {