1 package org.apache.lucene.index;
4 * Licensed to the Apache Software Foundation (ASF) under one or more
5 * contributor license agreements. See the NOTICE file distributed with
6 * this work for additional information regarding copyright ownership.
7 * The ASF licenses this file to You under the Apache License, Version 2.0
8 * (the "License"); you may not use this file except in compliance with
9 * the License. You may obtain a copy of the License at
11 * http://www.apache.org/licenses/LICENSE-2.0
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
20 import java.io.IOException;
22 import org.apache.lucene.document.Document;
23 import org.apache.lucene.store.Directory;
24 import org.apache.lucene.store.RAMDirectory;
25 import org.apache.lucene.util.LuceneTestCase;
27 public class TestSizeBoundedOptimize extends LuceneTestCase {
29 private void addDocs(IndexWriter writer, int numDocs) throws IOException {
30 for (int i = 0; i < numDocs; i++) {
31 Document doc = new Document();
32 writer.addDocument(doc);
37 private static IndexWriterConfig newWriterConfig() throws IOException {
38 IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, null);
39 conf.setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH);
40 conf.setRAMBufferSizeMB(IndexWriterConfig.DEFAULT_RAM_BUFFER_SIZE_MB);
41 // prevent any merges by default.
42 conf.setMergePolicy(NoMergePolicy.COMPOUND_FILES);
46 public void testByteSizeLimit() throws Exception {
47 // tests that the max merge size constraint is applied during optimize.
48 Directory dir = new RAMDirectory();
50 // Prepare an index w/ several small segments and a large one.
51 IndexWriterConfig conf = newWriterConfig();
52 IndexWriter writer = new IndexWriter(dir, conf);
53 final int numSegments = 15;
54 for (int i = 0; i < numSegments; i++) {
55 int numDocs = i == 7 ? 30 : 1;
56 addDocs(writer, numDocs);
60 SegmentInfos sis = new SegmentInfos();
62 double min = sis.info(0).sizeInBytes(true);
64 conf = newWriterConfig();
65 LogByteSizeMergePolicy lmp = new LogByteSizeMergePolicy();
66 lmp.setMaxMergeMBForOptimize((min + 1) / (1 << 20));
67 conf.setMergePolicy(lmp);
69 writer = new IndexWriter(dir, conf);
73 // Should only be 3 segments in the index, because one of them exceeds the size limit
74 sis = new SegmentInfos();
76 assertEquals(3, sis.size());
79 public void testNumDocsLimit() throws Exception {
80 // tests that the max merge docs constraint is applied during optimize.
81 Directory dir = new RAMDirectory();
83 // Prepare an index w/ several small segments and a large one.
84 IndexWriterConfig conf = newWriterConfig();
85 IndexWriter writer = new IndexWriter(dir, conf);
97 conf = newWriterConfig();
98 LogMergePolicy lmp = new LogDocMergePolicy();
99 lmp.setMaxMergeDocs(3);
100 conf.setMergePolicy(lmp);
102 writer = new IndexWriter(dir, conf);
106 // Should only be 3 segments in the index, because one of them exceeds the size limit
107 SegmentInfos sis = new SegmentInfos();
109 assertEquals(3, sis.size());
112 public void testLastSegmentTooLarge() throws Exception {
113 Directory dir = new RAMDirectory();
115 IndexWriterConfig conf = newWriterConfig();
116 IndexWriter writer = new IndexWriter(dir, conf);
125 conf = newWriterConfig();
126 LogMergePolicy lmp = new LogDocMergePolicy();
127 lmp.setMaxMergeDocs(3);
128 conf.setMergePolicy(lmp);
130 writer = new IndexWriter(dir, conf);
134 SegmentInfos sis = new SegmentInfos();
136 assertEquals(2, sis.size());
139 public void testFirstSegmentTooLarge() throws Exception {
140 Directory dir = new RAMDirectory();
142 IndexWriterConfig conf = newWriterConfig();
143 IndexWriter writer = new IndexWriter(dir, conf);
152 conf = newWriterConfig();
153 LogMergePolicy lmp = new LogDocMergePolicy();
154 lmp.setMaxMergeDocs(3);
155 conf.setMergePolicy(lmp);
157 writer = new IndexWriter(dir, conf);
161 SegmentInfos sis = new SegmentInfos();
163 assertEquals(2, sis.size());
166 public void testAllSegmentsSmall() throws Exception {
167 Directory dir = new RAMDirectory();
169 IndexWriterConfig conf = newWriterConfig();
170 IndexWriter writer = new IndexWriter(dir, conf);
179 conf = newWriterConfig();
180 LogMergePolicy lmp = new LogDocMergePolicy();
181 lmp.setMaxMergeDocs(3);
182 conf.setMergePolicy(lmp);
184 writer = new IndexWriter(dir, conf);
188 SegmentInfos sis = new SegmentInfos();
190 assertEquals(1, sis.size());
193 public void testAllSegmentsLarge() throws Exception {
194 Directory dir = new RAMDirectory();
196 IndexWriterConfig conf = newWriterConfig();
197 IndexWriter writer = new IndexWriter(dir, conf);
205 conf = newWriterConfig();
206 LogMergePolicy lmp = new LogDocMergePolicy();
207 lmp.setMaxMergeDocs(2);
208 conf.setMergePolicy(lmp);
210 writer = new IndexWriter(dir, conf);
214 SegmentInfos sis = new SegmentInfos();
216 assertEquals(3, sis.size());
219 public void testOneLargeOneSmall() throws Exception {
220 Directory dir = new RAMDirectory();
222 IndexWriterConfig conf = newWriterConfig();
223 IndexWriter writer = new IndexWriter(dir, conf);
232 conf = newWriterConfig();
233 LogMergePolicy lmp = new LogDocMergePolicy();
234 lmp.setMaxMergeDocs(3);
235 conf.setMergePolicy(lmp);
237 writer = new IndexWriter(dir, conf);
241 SegmentInfos sis = new SegmentInfos();
243 assertEquals(4, sis.size());
246 public void testMergeFactor() throws Exception {
247 Directory dir = new RAMDirectory();
249 IndexWriterConfig conf = newWriterConfig();
250 IndexWriter writer = new IndexWriter(dir, conf);
262 conf = newWriterConfig();
263 LogMergePolicy lmp = new LogDocMergePolicy();
264 lmp.setMaxMergeDocs(3);
265 lmp.setMergeFactor(2);
266 conf.setMergePolicy(lmp);
268 writer = new IndexWriter(dir, conf);
272 // Should only be 4 segments in the index, because of the merge factor and
273 // max merge docs settings.
274 SegmentInfos sis = new SegmentInfos();
276 assertEquals(4, sis.size());
279 public void testSingleNonOptimizedSegment() throws Exception {
280 Directory dir = new RAMDirectory();
282 IndexWriterConfig conf = newWriterConfig();
283 IndexWriter writer = new IndexWriter(dir, conf);
291 // delete the last document, so that the last segment is optimized.
292 IndexReader r = IndexReader.open(dir, false);
293 r.deleteDocument(r.numDocs() - 1);
296 conf = newWriterConfig();
297 LogMergePolicy lmp = new LogDocMergePolicy();
298 lmp.setMaxMergeDocs(3);
299 conf.setMergePolicy(lmp);
301 writer = new IndexWriter(dir, conf);
305 // Verify that the last segment does not have deletions.
306 SegmentInfos sis = new SegmentInfos();
308 assertEquals(3, sis.size());
309 assertFalse(sis.info(2).hasDeletions());
312 public void testSingleOptimizedSegment() throws Exception {
313 Directory dir = new RAMDirectory();
315 IndexWriterConfig conf = newWriterConfig();
316 IndexWriter writer = new IndexWriter(dir, conf);
322 conf = newWriterConfig();
323 LogMergePolicy lmp = new LogDocMergePolicy();
324 lmp.setMaxMergeDocs(3);
325 conf.setMergePolicy(lmp);
327 writer = new IndexWriter(dir, conf);
331 // Verify that the last segment does not have deletions.
332 SegmentInfos sis = new SegmentInfos();
334 assertEquals(1, sis.size());
337 public void testSingleNonOptimizedTooLargeSegment() throws Exception {
338 Directory dir = new RAMDirectory();
340 IndexWriterConfig conf = newWriterConfig();
341 IndexWriter writer = new IndexWriter(dir, conf);
347 // delete the last document
348 IndexReader r = IndexReader.open(dir, false);
349 r.deleteDocument(r.numDocs() - 1);
352 conf = newWriterConfig();
353 LogMergePolicy lmp = new LogDocMergePolicy();
354 lmp.setMaxMergeDocs(2);
355 conf.setMergePolicy(lmp);
357 writer = new IndexWriter(dir, conf);
361 // Verify that the last segment does not have deletions.
362 SegmentInfos sis = new SegmentInfos();
364 assertEquals(1, sis.size());
365 assertTrue(sis.info(0).hasDeletions());