1 package org.apache.lucene.util;
3 * Copyright 2009 The Apache Software Foundation
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
18 import java.util.ArrayList;
19 import java.util.Collection;
20 import java.util.HashMap;
21 import java.util.HashSet;
22 import java.util.List;
26 import org.apache.lucene.index.IndexReader;
27 import org.apache.lucene.search.FieldCache;
28 import org.apache.lucene.search.FieldCache.CacheEntry;
31 * Provides methods for sanity checking that entries in the FieldCache
32 * are not wasteful or inconsistent.
35 * Lucene 2.9 Introduced numerous enhancements into how the FieldCache
36 * is used by the low levels of Lucene searching (for Sorting and
37 * ValueSourceQueries) to improve both the speed for Sorting, as well
38 * as reopening of IndexReaders. But these changes have shifted the
39 * usage of FieldCache from "top level" IndexReaders (frequently a
40 * MultiReader or DirectoryReader) down to the leaf level SegmentReaders.
41 * As a result, existing applications that directly access the FieldCache
42 * may find RAM usage increase significantly when upgrading to 2.9 or
43 * Later. This class provides an API for these applications (or their
44 * Unit tests) to check at run time if the FieldCache contains "insane"
45 * usages of the FieldCache.
47 * @lucene.experimental
49 * @see FieldCacheSanityChecker.Insanity
50 * @see FieldCacheSanityChecker.InsanityType
52 public final class FieldCacheSanityChecker {
54 private RamUsageEstimator ramCalc = null;
55 public FieldCacheSanityChecker() {
59 * If set, will be used to estimate size for all CacheEntry objects
62 public void setRamUsageEstimator(RamUsageEstimator r) {
68 * Quick and dirty convenience method
71 public static Insanity[] checkSanity(FieldCache cache) {
72 return checkSanity(cache.getCacheEntries());
76 * Quick and dirty convenience method that instantiates an instance with
77 * "good defaults" and uses it to test the CacheEntrys
80 public static Insanity[] checkSanity(CacheEntry... cacheEntries) {
81 FieldCacheSanityChecker sanityChecker = new FieldCacheSanityChecker();
82 // doesn't check for interned
83 sanityChecker.setRamUsageEstimator(new RamUsageEstimator(false));
84 return sanityChecker.check(cacheEntries);
89 * Tests a CacheEntry[] for indication of "insane" cache usage.
91 * <B>NOTE:</b>FieldCache CreationPlaceholder objects are ignored.
92 * (:TODO: is this a bad idea? are we masking a real problem?)
95 public Insanity[] check(CacheEntry... cacheEntries) {
96 if (null == cacheEntries || 0 == cacheEntries.length)
97 return new Insanity[0];
99 if (null != ramCalc) {
100 for (int i = 0; i < cacheEntries.length; i++) {
101 cacheEntries[i].estimateSize(ramCalc);
105 // the indirect mapping lets MapOfSet dedup identical valIds for us
107 // maps the (valId) identityhashCode of cache values to
108 // sets of CacheEntry instances
109 final MapOfSets<Integer, CacheEntry> valIdToItems = new MapOfSets<Integer, CacheEntry>(new HashMap<Integer, Set<CacheEntry>>(17));
110 // maps ReaderField keys to Sets of ValueIds
111 final MapOfSets<ReaderField, Integer> readerFieldToValIds = new MapOfSets<ReaderField, Integer>(new HashMap<ReaderField, Set<Integer>>(17));
114 // any keys that we know result in more then one valId
115 final Set<ReaderField> valMismatchKeys = new HashSet<ReaderField>();
117 // iterate over all the cacheEntries to get the mappings we'll need
118 for (int i = 0; i < cacheEntries.length; i++) {
119 final CacheEntry item = cacheEntries[i];
120 final Object val = item.getValue();
122 // It's OK to have dup entries, where one is eg
123 // float[] and the other is the Bits (from
124 // getDocWithField())
125 if (val instanceof Bits) {
129 if (val instanceof FieldCache.CreationPlaceholder)
132 final ReaderField rf = new ReaderField(item.getReaderKey(),
133 item.getFieldName());
135 final Integer valId = Integer.valueOf(System.identityHashCode(val));
137 // indirect mapping, so the MapOfSet will dedup identical valIds for us
138 valIdToItems.put(valId, item);
139 if (1 < readerFieldToValIds.put(rf, valId)) {
140 valMismatchKeys.add(rf);
144 final List<Insanity> insanity = new ArrayList<Insanity>(valMismatchKeys.size() * 3);
146 insanity.addAll(checkValueMismatch(valIdToItems,
149 insanity.addAll(checkSubreaders(valIdToItems,
150 readerFieldToValIds));
152 return insanity.toArray(new Insanity[insanity.size()]);
156 * Internal helper method used by check that iterates over
157 * valMismatchKeys and generates a Collection of Insanity
158 * instances accordingly. The MapOfSets are used to populate
159 * the Insanity objects.
160 * @see InsanityType#VALUEMISMATCH
162 private Collection<Insanity> checkValueMismatch(MapOfSets<Integer, CacheEntry> valIdToItems,
163 MapOfSets<ReaderField, Integer> readerFieldToValIds,
164 Set<ReaderField> valMismatchKeys) {
166 final List<Insanity> insanity = new ArrayList<Insanity>(valMismatchKeys.size() * 3);
168 if (! valMismatchKeys.isEmpty() ) {
169 // we have multiple values for some ReaderFields
171 final Map<ReaderField, Set<Integer>> rfMap = readerFieldToValIds.getMap();
172 final Map<Integer, Set<CacheEntry>> valMap = valIdToItems.getMap();
173 for (final ReaderField rf : valMismatchKeys) {
174 final List<CacheEntry> badEntries = new ArrayList<CacheEntry>(valMismatchKeys.size() * 2);
175 for(final Integer value: rfMap.get(rf)) {
176 for (final CacheEntry cacheEntry : valMap.get(value)) {
177 badEntries.add(cacheEntry);
181 CacheEntry[] badness = new CacheEntry[badEntries.size()];
182 badness = badEntries.toArray(badness);
184 insanity.add(new Insanity(InsanityType.VALUEMISMATCH,
185 "Multiple distinct value objects for " +
186 rf.toString(), badness));
193 * Internal helper method used by check that iterates over
194 * the keys of readerFieldToValIds and generates a Collection
195 * of Insanity instances whenever two (or more) ReaderField instances are
196 * found that have an ancestry relationships.
198 * @see InsanityType#SUBREADER
200 private Collection<Insanity> checkSubreaders( MapOfSets<Integer, CacheEntry> valIdToItems,
201 MapOfSets<ReaderField, Integer> readerFieldToValIds) {
203 final List<Insanity> insanity = new ArrayList<Insanity>(23);
205 Map<ReaderField, Set<ReaderField>> badChildren = new HashMap<ReaderField, Set<ReaderField>>(17);
206 MapOfSets<ReaderField, ReaderField> badKids = new MapOfSets<ReaderField, ReaderField>(badChildren); // wrapper
208 Map<Integer, Set<CacheEntry>> viToItemSets = valIdToItems.getMap();
209 Map<ReaderField, Set<Integer>> rfToValIdSets = readerFieldToValIds.getMap();
211 Set<ReaderField> seen = new HashSet<ReaderField>(17);
213 Set<ReaderField> readerFields = rfToValIdSets.keySet();
214 for (final ReaderField rf : readerFields) {
216 if (seen.contains(rf)) continue;
218 List<Object> kids = getAllDescendantReaderKeys(rf.readerKey);
219 for (Object kidKey : kids) {
220 ReaderField kid = new ReaderField(kidKey, rf.fieldName);
222 if (badChildren.containsKey(kid)) {
223 // we've already process this kid as RF and found other problems
224 // track those problems as our own
225 badKids.put(rf, kid);
226 badKids.putAll(rf, badChildren.get(kid));
227 badChildren.remove(kid);
229 } else if (rfToValIdSets.containsKey(kid)) {
230 // we have cache entries for the kid
231 badKids.put(rf, kid);
238 // every mapping in badKids represents an Insanity
239 for (final ReaderField parent : badChildren.keySet()) {
240 Set<ReaderField> kids = badChildren.get(parent);
242 List<CacheEntry> badEntries = new ArrayList<CacheEntry>(kids.size() * 2);
244 // put parent entr(ies) in first
246 for (final Integer value : rfToValIdSets.get(parent)) {
247 badEntries.addAll(viToItemSets.get(value));
251 // now the entries for the descendants
252 for (final ReaderField kid : kids) {
253 for (final Integer value : rfToValIdSets.get(kid)) {
254 badEntries.addAll(viToItemSets.get(value));
258 CacheEntry[] badness = new CacheEntry[badEntries.size()];
259 badness = badEntries.toArray(badness);
261 insanity.add(new Insanity(InsanityType.SUBREADER,
262 "Found caches for descendants of " +
272 * Checks if the seed is an IndexReader, and if so will walk
273 * the hierarchy of subReaders building up a list of the objects
274 * returned by obj.getFieldCacheKey()
276 private List<Object> getAllDescendantReaderKeys(Object seed) {
277 List<Object> all = new ArrayList<Object>(17); // will grow as we iter
279 for (int i = 0; i < all.size(); i++) {
280 Object obj = all.get(i);
281 if (obj instanceof IndexReader) {
282 IndexReader[] subs = ((IndexReader)obj).getSequentialSubReaders();
283 for (int j = 0; (null != subs) && (j < subs.length); j++) {
284 all.add(subs[j].getCoreCacheKey());
289 // need to skip the first, because it was the seed
290 return all.subList(1, all.size());
294 * Simple pair object for using "readerKey + fieldName" a Map key
296 private final static class ReaderField {
297 public final Object readerKey;
298 public final String fieldName;
299 public ReaderField(Object readerKey, String fieldName) {
300 this.readerKey = readerKey;
301 this.fieldName = fieldName;
304 public int hashCode() {
305 return System.identityHashCode(readerKey) * fieldName.hashCode();
308 public boolean equals(Object that) {
309 if (! (that instanceof ReaderField)) return false;
311 ReaderField other = (ReaderField) that;
312 return (this.readerKey == other.readerKey &&
313 this.fieldName.equals(other.fieldName));
316 public String toString() {
317 return readerKey.toString() + "+" + fieldName;
322 * Simple container for a collection of related CacheEntry objects that
323 * in conjunction with each other represent some "insane" usage of the
326 public final static class Insanity {
327 private final InsanityType type;
328 private final String msg;
329 private final CacheEntry[] entries;
330 public Insanity(InsanityType type, String msg, CacheEntry... entries) {
332 throw new IllegalArgumentException
333 ("Insanity requires non-null InsanityType");
335 if (null == entries || 0 == entries.length) {
336 throw new IllegalArgumentException
337 ("Insanity requires non-null/non-empty CacheEntry[]");
341 this.entries = entries;
345 * Type of insane behavior this object represents
347 public InsanityType getType() { return type; }
349 * Description of hte insane behavior
351 public String getMsg() { return msg; }
353 * CacheEntry objects which suggest a problem
355 public CacheEntry[] getCacheEntries() { return entries; }
357 * Multi-Line representation of this Insanity object, starting with
358 * the Type and Msg, followed by each CacheEntry.toString() on it's
359 * own line prefaced by a tab character
362 public String toString() {
363 StringBuilder buf = new StringBuilder();
364 buf.append(getType()).append(": ");
367 if (null != m) buf.append(m);
371 CacheEntry[] ce = getCacheEntries();
372 for (int i = 0; i < ce.length; i++) {
373 buf.append('\t').append(ce[i].toString()).append('\n');
376 return buf.toString();
381 * An Enumeration of the different types of "insane" behavior that
382 * may be detected in a FieldCache.
384 * @see InsanityType#SUBREADER
385 * @see InsanityType#VALUEMISMATCH
386 * @see InsanityType#EXPECTED
388 public final static class InsanityType {
389 private final String label;
390 private InsanityType(final String label) {
394 public String toString() { return label; }
397 * Indicates an overlap in cache usage on a given field
398 * in sub/super readers.
400 public final static InsanityType SUBREADER
401 = new InsanityType("SUBREADER");
405 * Indicates entries have the same reader+fieldname but
406 * different cached values. This can happen if different datatypes,
407 * or parsers are used -- and while it's not necessarily a bug
408 * it's typically an indication of a possible problem.
411 * <bPNOTE:</b> Only the reader, fieldname, and cached value are actually
412 * tested -- if two cache entries have different parsers or datatypes but
413 * the cached values are the same Object (== not just equal()) this method
414 * does not consider that a red flag. This allows for subtle variations
415 * in the way a Parser is specified (null vs DEFAULT_LONG_PARSER, etc...)
418 public final static InsanityType VALUEMISMATCH
419 = new InsanityType("VALUEMISMATCH");
422 * Indicates an expected bit of "insanity". This may be useful for
423 * clients that wish to preserve/log information about insane usage
424 * but indicate that it was expected.
426 public final static InsanityType EXPECTED
427 = new InsanityType("EXPECTED");