1 package org.apache.lucene.search;
4 * Licensed to the Apache Software Foundation (ASF) under one or more
5 * contributor license agreements. See the NOTICE file distributed with
6 * this work for additional information regarding copyright ownership.
7 * The ASF licenses this file to You under the Apache License, Version 2.0
8 * (the "License"); you may not use this file except in compliance with
9 * the License. You may obtain a copy of the License at
11 * http://www.apache.org/licenses/LICENSE-2.0
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
20 import java.io.IOException;
22 import org.apache.lucene.analysis.MockAnalyzer;
23 import org.apache.lucene.document.Document;
24 import org.apache.lucene.document.Field;
25 import org.apache.lucene.index.IndexReader;
26 import org.apache.lucene.index.RandomIndexWriter;
27 import org.apache.lucene.index.SerialMergeScheduler;
28 import org.apache.lucene.index.Term;
29 import org.apache.lucene.store.Directory;
30 import org.apache.lucene.util.FixedBitSet;
31 import org.apache.lucene.util.LuceneTestCase;
32 import org.apache.lucene.util._TestUtil;
34 public class TestCachingWrapperFilter extends LuceneTestCase {
36 public void testCachingWorks() throws Exception {
37 Directory dir = newDirectory();
38 RandomIndexWriter writer = new RandomIndexWriter(random, dir);
41 IndexReader reader = IndexReader.open(dir, true);
43 MockFilter filter = new MockFilter();
44 CachingWrapperFilter cacher = new CachingWrapperFilter(filter);
46 // first time, nested filter is called
47 cacher.getDocIdSet(reader);
48 assertTrue("first time", filter.wasCalled());
50 // make sure no exception if cache is holding the wrong docIdSet
51 cacher.getDocIdSet(reader);
53 // second time, nested filter should not be called
55 cacher.getDocIdSet(reader);
56 assertFalse("second time", filter.wasCalled());
62 public void testNullDocIdSet() throws Exception {
63 Directory dir = newDirectory();
64 RandomIndexWriter writer = new RandomIndexWriter(random, dir);
67 IndexReader reader = IndexReader.open(dir, true);
69 final Filter filter = new Filter() {
71 public DocIdSet getDocIdSet(IndexReader reader) {
75 CachingWrapperFilter cacher = new CachingWrapperFilter(filter);
77 // the caching filter should return the empty set constant
78 assertSame(DocIdSet.EMPTY_DOCIDSET, cacher.getDocIdSet(reader));
84 public void testNullDocIdSetIterator() throws Exception {
85 Directory dir = newDirectory();
86 RandomIndexWriter writer = new RandomIndexWriter(random, dir);
89 IndexReader reader = IndexReader.open(dir, true);
91 final Filter filter = new Filter() {
93 public DocIdSet getDocIdSet(IndexReader reader) {
94 return new DocIdSet() {
96 public DocIdSetIterator iterator() {
102 CachingWrapperFilter cacher = new CachingWrapperFilter(filter);
104 // the caching filter should return the empty set constant
105 assertSame(DocIdSet.EMPTY_DOCIDSET, cacher.getDocIdSet(reader));
111 private static void assertDocIdSetCacheable(IndexReader reader, Filter filter, boolean shouldCacheable) throws IOException {
112 final CachingWrapperFilter cacher = new CachingWrapperFilter(filter);
113 final DocIdSet originalSet = filter.getDocIdSet(reader.getSequentialSubReaders()[0]);
114 final DocIdSet cachedSet = cacher.getDocIdSet(reader.getSequentialSubReaders()[0]);
115 assertTrue(cachedSet.isCacheable());
116 assertEquals(shouldCacheable, originalSet.isCacheable());
117 //System.out.println("Original: "+originalSet.getClass().getName()+" -- cached: "+cachedSet.getClass().getName());
118 if (originalSet.isCacheable()) {
119 assertEquals("Cached DocIdSet must be of same class like uncached, if cacheable", originalSet.getClass(), cachedSet.getClass());
121 assertTrue("Cached DocIdSet must be an FixedBitSet if the original one was not cacheable (got " + cachedSet + ")", cachedSet instanceof FixedBitSet || cachedSet == DocIdSet.EMPTY_DOCIDSET);
125 public void testIsCacheAble() throws Exception {
126 Directory dir = newDirectory();
127 RandomIndexWriter writer = new RandomIndexWriter(random, dir);
128 writer.addDocument(new Document());
131 IndexReader reader = IndexReader.open(dir, true);
134 assertDocIdSetCacheable(reader, new QueryWrapperFilter(new TermQuery(new Term("test","value"))), false);
135 // returns default empty docidset, always cacheable:
136 assertDocIdSetCacheable(reader, NumericRangeFilter.newIntRange("test", Integer.valueOf(10000), Integer.valueOf(-10000), true, true), true);
138 assertDocIdSetCacheable(reader, FieldCacheRangeFilter.newIntRange("test", Integer.valueOf(10), Integer.valueOf(20), true, true), true);
139 // a fixedbitset filter is always cacheable
140 assertDocIdSetCacheable(reader, new Filter() {
142 public DocIdSet getDocIdSet(IndexReader reader) {
143 return new FixedBitSet(reader.maxDoc());
151 public void testEnforceDeletions() throws Exception {
152 Directory dir = newDirectory();
153 RandomIndexWriter writer = new RandomIndexWriter(
156 newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
157 setMergeScheduler(new SerialMergeScheduler()).
158 // asserts below requires no unexpected merges:
159 setMergePolicy(newLogMergePolicy(10))
162 // NOTE: cannot use writer.getReader because RIW (on
163 // flipping a coin) may give us a newly opened reader,
164 // but we use .reopen on this reader below and expect to
165 // (must) get an NRT reader:
166 IndexReader reader = IndexReader.open(writer.w, true);
167 // same reason we don't wrap?
168 IndexSearcher searcher = newSearcher(reader, false);
170 // add a doc, refresh the reader, and check that its there
171 Document doc = new Document();
172 doc.add(newField("id", "1", Field.Store.YES, Field.Index.NOT_ANALYZED));
173 writer.addDocument(doc);
175 reader = refreshReader(reader);
177 searcher = newSearcher(reader, false);
179 TopDocs docs = searcher.search(new MatchAllDocsQuery(), 1);
180 assertEquals("Should find a hit...", 1, docs.totalHits);
182 final Filter startFilter = new QueryWrapperFilter(new TermQuery(new Term("id", "1")));
185 CachingWrapperFilter filter = new CachingWrapperFilter(startFilter, CachingWrapperFilter.DeletesMode.IGNORE);
187 docs = searcher.search(new MatchAllDocsQuery(), filter, 1);
188 assertEquals("[query + filter] Should find a hit...", 1, docs.totalHits);
189 ConstantScoreQuery constantScore = new ConstantScoreQuery(filter);
190 docs = searcher.search(constantScore, 1);
191 assertEquals("[just filter] Should find a hit...", 1, docs.totalHits);
193 // now delete the doc, refresh the reader, and see that it's not there
194 _TestUtil.keepFullyDeletedSegments(writer.w);
195 writer.deleteDocuments(new Term("id", "1"));
197 reader = refreshReader(reader);
199 searcher = newSearcher(reader, false);
201 docs = searcher.search(new MatchAllDocsQuery(), filter, 1);
202 assertEquals("[query + filter] Should *not* find a hit...", 0, docs.totalHits);
204 docs = searcher.search(constantScore, 1);
205 assertEquals("[just filter] Should find a hit...", 1, docs.totalHits);
208 // force cache to regenerate:
209 filter = new CachingWrapperFilter(startFilter, CachingWrapperFilter.DeletesMode.RECACHE);
211 writer.addDocument(doc);
213 reader = refreshReader(reader);
215 searcher = newSearcher(reader, false);
217 docs = searcher.search(new MatchAllDocsQuery(), filter, 1);
219 assertEquals("[query + filter] Should find a hit...", 1, docs.totalHits);
221 constantScore = new ConstantScoreQuery(filter);
222 docs = searcher.search(constantScore, 1);
223 assertEquals("[just filter] Should find a hit...", 1, docs.totalHits);
225 // NOTE: important to hold ref here so GC doesn't clear
226 // the cache entry! Else the assert below may sometimes
228 IndexReader oldReader = reader;
230 // make sure we get a cache hit when we reopen reader
231 // that had no change to deletions
232 writer.deleteDocuments(new Term("foo", "bar"));
233 reader = refreshReader(reader);
234 assertTrue(reader == oldReader);
235 int missCount = filter.missCount;
236 docs = searcher.search(constantScore, 1);
237 assertEquals("[just filter] Should find a hit...", 1, docs.totalHits);
238 assertEquals(missCount, filter.missCount);
240 // now delete the doc, refresh the reader, and see that it's not there
241 writer.deleteDocuments(new Term("id", "1"));
243 reader = refreshReader(reader);
245 searcher = newSearcher(reader, false);
247 missCount = filter.missCount;
248 docs = searcher.search(new MatchAllDocsQuery(), filter, 1);
249 assertEquals(missCount+1, filter.missCount);
250 assertEquals("[query + filter] Should *not* find a hit...", 0, docs.totalHits);
251 docs = searcher.search(constantScore, 1);
252 assertEquals("[just filter] Should *not* find a hit...", 0, docs.totalHits);
255 // apply deletions dynamically
256 filter = new CachingWrapperFilter(startFilter, CachingWrapperFilter.DeletesMode.DYNAMIC);
258 writer.addDocument(doc);
259 reader = refreshReader(reader);
261 searcher = newSearcher(reader, false);
263 docs = searcher.search(new MatchAllDocsQuery(), filter, 1);
264 assertEquals("[query + filter] Should find a hit...", 1, docs.totalHits);
265 constantScore = new ConstantScoreQuery(filter);
266 docs = searcher.search(constantScore, 1);
267 assertEquals("[just filter] Should find a hit...", 1, docs.totalHits);
269 // now delete the doc, refresh the reader, and see that it's not there
270 writer.deleteDocuments(new Term("id", "1"));
272 reader = refreshReader(reader);
274 searcher = newSearcher(reader, false);
276 docs = searcher.search(new MatchAllDocsQuery(), filter, 1);
277 assertEquals("[query + filter] Should *not* find a hit...", 0, docs.totalHits);
279 missCount = filter.missCount;
280 docs = searcher.search(constantScore, 1);
281 assertEquals("[just filter] Should *not* find a hit...", 0, docs.totalHits);
283 // doesn't count as a miss
284 assertEquals(missCount, filter.missCount);
286 // NOTE: silliness to make sure JRE does not eliminate
287 // our holding onto oldReader to prevent
288 // CachingWrapperFilter's WeakHashMap from dropping the
290 assertTrue(oldReader != null);
298 private static IndexReader refreshReader(IndexReader reader) throws IOException {
299 IndexReader oldReader = reader;
300 reader = IndexReader.openIfChanged(reader);
301 if (reader != null) {