1 package org.apache.lucene.index;
4 * Licensed to the Apache Software Foundation (ASF) under one or more
5 * contributor license agreements. See the NOTICE file distributed with
6 * this work for additional information regarding copyright ownership.
7 * The ASF licenses this file to You under the Apache License, Version 2.0
8 * (the "License"); you may not use this file except in compliance with
9 * the License. You may obtain a copy of the License at
11 * http://www.apache.org/licenses/LICENSE-2.0
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
20 import java.util.Iterator;
23 import org.apache.lucene.search.Query;
24 import org.apache.lucene.util.ArrayUtil;
25 import org.apache.lucene.util.RamUsageEstimator;
26 import org.apache.lucene.index.BufferedDeletesStream.QueryAndLimit;
28 /** Holds buffered deletes by term or query, once pushed.
29 * Pushed deletes are write-once, so we shift to more
30 * memory efficient data structure to hold them. We don't
31 * hold docIDs because these are applied on flush. */
33 class FrozenBufferedDeletes {
35 /* Query we often undercount (say 24 bytes), plus int. */
36 final static int BYTES_PER_DEL_QUERY = RamUsageEstimator.NUM_BYTES_OBJECT_REF + RamUsageEstimator.NUM_BYTES_INT + 24;
38 // Terms, in sorted order:
39 final PrefixCodedTerms terms;
40 int termCount; // just for debugging
42 // Parallel array of deleted query, and the docIDUpto for
44 final Query[] queries;
45 final int[] queryLimits;
47 final int numTermDeletes;
50 public FrozenBufferedDeletes(BufferedDeletes deletes, long gen) {
51 Term termsArray[] = deletes.terms.keySet().toArray(new Term[deletes.terms.size()]);
52 termCount = termsArray.length;
53 ArrayUtil.mergeSort(termsArray);
54 PrefixCodedTerms.Builder builder = new PrefixCodedTerms.Builder();
55 for (Term term : termsArray) {
58 terms = builder.finish();
60 queries = new Query[deletes.queries.size()];
61 queryLimits = new int[deletes.queries.size()];
63 for(Map.Entry<Query,Integer> ent : deletes.queries.entrySet()) {
64 queries[upto] = ent.getKey();
65 queryLimits[upto] = ent.getValue();
69 bytesUsed = (int) terms.getSizeInBytes() + queries.length * BYTES_PER_DEL_QUERY;
70 numTermDeletes = deletes.numTermDeletes.get();
74 public Iterable<Term> termsIterable() {
75 return new Iterable<Term>() {
76 // @Override -- not until Java 1.6
77 public Iterator<Term> iterator() {
78 return terms.iterator();
83 public Iterable<QueryAndLimit> queriesIterable() {
84 return new Iterable<QueryAndLimit>() {
85 // @Override -- not until Java 1.6
86 public Iterator<QueryAndLimit> iterator() {
87 return new Iterator<QueryAndLimit>() {
90 // @Override -- not until Java 1.6
91 public boolean hasNext() {
92 return upto < queries.length;
95 // @Override -- not until Java 1.6
96 public QueryAndLimit next() {
97 QueryAndLimit ret = new QueryAndLimit(queries[upto], queryLimits[upto]);
102 // @Override -- not until Java 1.6
103 public void remove() {
104 throw new UnsupportedOperationException();
112 public String toString() {
114 if (numTermDeletes != 0) {
115 s += " " + numTermDeletes + " deleted terms (unique count=" + termCount + ")";
117 if (queries.length != 0) {
118 s += " " + queries.length + " deleted queries";
120 if (bytesUsed != 0) {
121 s += " bytesUsed=" + bytesUsed;
128 return termCount > 0 || queries.length > 0;