1 package org.apache.lucene.search;
4 * Licensed to the Apache Software Foundation (ASF) under one or more
5 * contributor license agreements. See the NOTICE file distributed with
6 * this work for additional information regarding copyright ownership.
7 * The ASF licenses this file to You under the Apache License, Version 2.0
8 * (the "License"); you may not use this file except in compliance with
9 * the License. You may obtain a copy of the License at
11 * http://www.apache.org/licenses/LICENSE-2.0
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
20 import java.io.IOException;
22 import org.apache.lucene.document.Document;
23 import org.apache.lucene.document.Field;
24 import org.apache.lucene.analysis.MockAnalyzer;
25 import org.apache.lucene.index.IndexReader;
26 import org.apache.lucene.index.RandomIndexWriter;
27 import org.apache.lucene.index.SerialMergeScheduler;
28 import org.apache.lucene.index.Term;
29 import org.apache.lucene.store.Directory;
30 import org.apache.lucene.util.LuceneTestCase;
31 import org.apache.lucene.util.FixedBitSet;
32 import org.apache.lucene.util._TestUtil;
34 public class TestCachingWrapperFilter extends LuceneTestCase {
36 public void testCachingWorks() throws Exception {
37 Directory dir = newDirectory();
38 RandomIndexWriter writer = new RandomIndexWriter(random, dir);
41 IndexReader reader = IndexReader.open(dir, true);
43 MockFilter filter = new MockFilter();
44 CachingWrapperFilter cacher = new CachingWrapperFilter(filter);
46 // first time, nested filter is called
47 cacher.getDocIdSet(reader);
48 assertTrue("first time", filter.wasCalled());
50 // make sure no exception if cache is holding the wrong docIdSet
51 cacher.getDocIdSet(reader);
53 // second time, nested filter should not be called
55 cacher.getDocIdSet(reader);
56 assertFalse("second time", filter.wasCalled());
62 public void testNullDocIdSet() throws Exception {
63 Directory dir = newDirectory();
64 RandomIndexWriter writer = new RandomIndexWriter(random, dir);
67 IndexReader reader = IndexReader.open(dir, true);
69 final Filter filter = new Filter() {
71 public DocIdSet getDocIdSet(IndexReader reader) {
75 CachingWrapperFilter cacher = new CachingWrapperFilter(filter);
77 // the caching filter should return the empty set constant
78 assertSame(DocIdSet.EMPTY_DOCIDSET, cacher.getDocIdSet(reader));
84 public void testNullDocIdSetIterator() throws Exception {
85 Directory dir = newDirectory();
86 RandomIndexWriter writer = new RandomIndexWriter(random, dir);
89 IndexReader reader = IndexReader.open(dir, true);
91 final Filter filter = new Filter() {
93 public DocIdSet getDocIdSet(IndexReader reader) {
94 return new DocIdSet() {
96 public DocIdSetIterator iterator() {
102 CachingWrapperFilter cacher = new CachingWrapperFilter(filter);
104 // the caching filter should return the empty set constant
105 assertSame(DocIdSet.EMPTY_DOCIDSET, cacher.getDocIdSet(reader));
111 private static void assertDocIdSetCacheable(IndexReader reader, Filter filter, boolean shouldCacheable) throws IOException {
112 final CachingWrapperFilter cacher = new CachingWrapperFilter(filter);
113 final DocIdSet originalSet = filter.getDocIdSet(reader.getSequentialSubReaders()[0]);
114 final DocIdSet cachedSet = cacher.getDocIdSet(reader.getSequentialSubReaders()[0]);
115 assertTrue(cachedSet.isCacheable());
116 assertEquals(shouldCacheable, originalSet.isCacheable());
117 //System.out.println("Original: "+originalSet.getClass().getName()+" -- cached: "+cachedSet.getClass().getName());
118 if (originalSet.isCacheable()) {
119 assertEquals("Cached DocIdSet must be of same class like uncached, if cacheable", originalSet.getClass(), cachedSet.getClass());
121 assertTrue("Cached DocIdSet must be an FixedBitSet if the original one was not cacheable (got " + cachedSet + ")", cachedSet instanceof FixedBitSet || cachedSet == DocIdSet.EMPTY_DOCIDSET);
125 public void testIsCacheAble() throws Exception {
126 Directory dir = newDirectory();
127 RandomIndexWriter writer = new RandomIndexWriter(random, dir);
128 writer.addDocument(new Document());
131 IndexReader reader = IndexReader.open(dir, true);
134 assertDocIdSetCacheable(reader, new QueryWrapperFilter(new TermQuery(new Term("test","value"))), false);
135 // returns default empty docidset, always cacheable:
136 assertDocIdSetCacheable(reader, NumericRangeFilter.newIntRange("test", Integer.valueOf(10000), Integer.valueOf(-10000), true, true), true);
138 assertDocIdSetCacheable(reader, FieldCacheRangeFilter.newIntRange("test", Integer.valueOf(10), Integer.valueOf(20), true, true), true);
139 // a fixedbitset filter is always cacheable
140 assertDocIdSetCacheable(reader, new Filter() {
142 public DocIdSet getDocIdSet(IndexReader reader) {
143 return new FixedBitSet(reader.maxDoc());
151 public void testEnforceDeletions() throws Exception {
152 Directory dir = newDirectory();
153 RandomIndexWriter writer = new RandomIndexWriter(
156 newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
157 setMergeScheduler(new SerialMergeScheduler()).
158 // asserts below requires no unexpected merges:
159 setMergePolicy(newLogMergePolicy(10))
162 // NOTE: cannot use writer.getReader because RIW (on
163 // flipping a coin) may give us a newly opened reader,
164 // but we use .reopen on this reader below and expect to
165 // (must) get an NRT reader:
166 IndexReader reader = IndexReader.open(writer.w, true);
167 // same reason we don't wrap?
168 IndexSearcher searcher = newSearcher(reader, false);
170 // add a doc, refresh the reader, and check that its there
171 Document doc = new Document();
172 doc.add(newField("id", "1", Field.Store.YES, Field.Index.NOT_ANALYZED));
173 writer.addDocument(doc);
175 reader = refreshReader(reader);
177 searcher = newSearcher(reader, false);
179 TopDocs docs = searcher.search(new MatchAllDocsQuery(), 1);
180 assertEquals("Should find a hit...", 1, docs.totalHits);
182 final Filter startFilter = new QueryWrapperFilter(new TermQuery(new Term("id", "1")));
185 CachingWrapperFilter filter = new CachingWrapperFilter(startFilter, CachingWrapperFilter.DeletesMode.IGNORE);
187 docs = searcher.search(new MatchAllDocsQuery(), filter, 1);
188 assertEquals("[query + filter] Should find a hit...", 1, docs.totalHits);
189 ConstantScoreQuery constantScore = new ConstantScoreQuery(filter);
190 docs = searcher.search(constantScore, 1);
191 assertEquals("[just filter] Should find a hit...", 1, docs.totalHits);
193 // now delete the doc, refresh the reader, and see that it's not there
194 _TestUtil.keepFullyDeletedSegments(writer.w);
195 writer.deleteDocuments(new Term("id", "1"));
197 reader = refreshReader(reader);
199 searcher = newSearcher(reader, false);
201 docs = searcher.search(new MatchAllDocsQuery(), filter, 1);
202 assertEquals("[query + filter] Should *not* find a hit...", 0, docs.totalHits);
204 docs = searcher.search(constantScore, 1);
205 assertEquals("[just filter] Should find a hit...", 1, docs.totalHits);
208 // force cache to regenerate:
209 filter = new CachingWrapperFilter(startFilter, CachingWrapperFilter.DeletesMode.RECACHE);
211 writer.addDocument(doc);
213 reader = refreshReader(reader);
215 searcher = newSearcher(reader, false);
217 docs = searcher.search(new MatchAllDocsQuery(), filter, 1);
219 assertEquals("[query + filter] Should find a hit...", 1, docs.totalHits);
221 constantScore = new ConstantScoreQuery(filter);
222 docs = searcher.search(constantScore, 1);
223 assertEquals("[just filter] Should find a hit...", 1, docs.totalHits);
225 // NOTE: important to hold ref here so GC doesn't clear
226 // the cache entry! Else the assert below may sometimes
228 IndexReader oldReader = reader;
230 // make sure we get a cache hit when we reopen reader
231 // that had no change to deletions
233 writer.deleteDocuments(new Term("foo", "bar"));
234 reader = refreshReader(reader);
235 assertTrue(reader == oldReader);
236 int missCount = filter.missCount;
237 docs = searcher.search(constantScore, 1);
238 assertEquals("[just filter] Should find a hit...", 1, docs.totalHits);
239 assertEquals(missCount, filter.missCount);
241 // now delete the doc, refresh the reader, and see that it's not there
242 writer.deleteDocuments(new Term("id", "1"));
244 reader = refreshReader(reader);
246 searcher = newSearcher(reader, false);
248 missCount = filter.missCount;
249 docs = searcher.search(new MatchAllDocsQuery(), filter, 1);
250 assertEquals(missCount+1, filter.missCount);
251 assertEquals("[query + filter] Should *not* find a hit...", 0, docs.totalHits);
252 docs = searcher.search(constantScore, 1);
253 assertEquals("[just filter] Should *not* find a hit...", 0, docs.totalHits);
256 // apply deletions dynamically
257 filter = new CachingWrapperFilter(startFilter, CachingWrapperFilter.DeletesMode.DYNAMIC);
259 writer.addDocument(doc);
260 reader = refreshReader(reader);
262 searcher = newSearcher(reader, false);
264 docs = searcher.search(new MatchAllDocsQuery(), filter, 1);
265 assertEquals("[query + filter] Should find a hit...", 1, docs.totalHits);
266 constantScore = new ConstantScoreQuery(filter);
267 docs = searcher.search(constantScore, 1);
268 assertEquals("[just filter] Should find a hit...", 1, docs.totalHits);
270 // now delete the doc, refresh the reader, and see that it's not there
271 writer.deleteDocuments(new Term("id", "1"));
273 reader = refreshReader(reader);
275 searcher = newSearcher(reader, false);
277 docs = searcher.search(new MatchAllDocsQuery(), filter, 1);
278 assertEquals("[query + filter] Should *not* find a hit...", 0, docs.totalHits);
280 missCount = filter.missCount;
281 docs = searcher.search(constantScore, 1);
282 assertEquals("[just filter] Should *not* find a hit...", 0, docs.totalHits);
284 // doesn't count as a miss
285 assertEquals(missCount, filter.missCount);
287 // NOTE: silliness to make sure JRE does not optimize
288 // away our holding onto oldReader to prevent
289 // CachingWrapperFilter's WeakHashMap from dropping the
291 assertTrue(oldReader != null);
299 private static IndexReader refreshReader(IndexReader reader) throws IOException {
300 IndexReader oldReader = reader;
301 reader = reader.reopen();
302 if (reader != oldReader) {