1 package org.apache.lucene.index;
4 * Licensed to the Apache Software Foundation (ASF) under one or more
5 * contributor license agreements. See the NOTICE file distributed with
6 * this work for additional information regarding copyright ownership.
7 * The ASF licenses this file to You under the Apache License, Version 2.0
8 * (the "License"); you may not use this file except in compliance with
9 * the License. You may obtain a copy of the License at
11 * http://www.apache.org/licenses/LICENSE-2.0
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
20 import java.io.IOException;
21 import java.io.Reader;
23 import org.apache.lucene.analysis.Analyzer;
24 import org.apache.lucene.analysis.ReusableAnalyzerBase;
25 import org.apache.lucene.analysis.TokenStream;
26 import org.apache.lucene.analysis.Tokenizer;
27 import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
28 import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
29 import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
30 import org.apache.lucene.document.Document;
31 import org.apache.lucene.document.Field;
32 import org.apache.lucene.store.Directory;
33 import org.apache.lucene.util.LuceneTestCase;
35 public class TestSameTokenSamePosition extends LuceneTestCase {
38 * Attempt to reproduce an assertion error that happens
39 * only with the trunk version around April 2011.
41 public void test() throws Exception {
42 Directory dir = newDirectory();
43 RandomIndexWriter riw = new RandomIndexWriter(random, dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new BugReproAnalyzer()));
44 Document doc = new Document();
45 doc.add(new Field("eng", "Six drunken" /*This shouldn't matter. */,
46 Field.Store.YES, Field.Index.ANALYZED));
53 * Same as the above, but with more docs
55 public void testMoreDocs() throws Exception {
56 Directory dir = newDirectory();
57 RandomIndexWriter riw = new RandomIndexWriter(random, dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new BugReproAnalyzer()));
58 Document doc = new Document();
59 doc.add(new Field("eng", "Six drunken" /*This shouldn't matter. */,
60 Field.Store.YES, Field.Index.ANALYZED));
61 for (int i = 0; i < 100; i++) {
69 final class BugReproAnalyzer extends Analyzer{
71 public TokenStream tokenStream(String arg0, Reader arg1) {
72 return new BugReproAnalyzerTokenizer();
76 final class BugReproAnalyzerTokenizer extends Tokenizer {
77 private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
78 private final OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class);
79 private final PositionIncrementAttribute posIncAtt = addAttribute(PositionIncrementAttribute.class);
80 private final int tokenCount = 4;
81 private int nextTokenIndex = 0;
82 private final String terms[] = new String[]{"six", "six", "drunken", "drunken"};
83 private final int starts[] = new int[]{0, 0, 4, 4};
84 private final int ends[] = new int[]{3, 3, 11, 11};
85 private final int incs[] = new int[]{1, 0, 1, 0};
88 public boolean incrementToken() throws IOException {
89 if (nextTokenIndex < tokenCount) {
90 termAtt.setEmpty().append(terms[nextTokenIndex]);
91 offsetAtt.setOffset(starts[nextTokenIndex], ends[nextTokenIndex]);
92 posIncAtt.setPositionIncrement(incs[nextTokenIndex]);
101 public void reset() throws IOException {
103 this.nextTokenIndex = 0;