1 package org.apache.lucene.search.highlight;
4 * Licensed to the Apache Software Foundation (ASF) under one or more
5 * contributor license agreements. See the NOTICE file distributed with
6 * this work for additional information regarding copyright ownership.
7 * The ASF licenses this file to You under the Apache License, Version 2.0
8 * (the "License"); you may not use this file except in compliance with
9 * the License. You may obtain a copy of the License at
11 * http://www.apache.org/licenses/LICENSE-2.0
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
20 import java.io.Reader;
21 import java.io.StringReader;
23 import org.apache.lucene.analysis.Analyzer;
24 import org.apache.lucene.analysis.BaseTokenStreamTestCase;
25 import org.apache.lucene.analysis.MockTokenizer;
26 import org.apache.lucene.analysis.WhitespaceAnalyzer;
28 import org.apache.lucene.analysis.TokenStream;
29 import org.apache.lucene.util.Version;
31 public class OffsetLimitTokenFilterTest extends BaseTokenStreamTestCase {
33 public void testFilter() throws Exception {
34 // we disable MockTokenizer checks because we will forcefully limit the
35 // tokenstream and call end() before incrementToken() returns false.
36 MockTokenizer stream = new MockTokenizer(new StringReader(
37 "short toolong evenmuchlongertext a ab toolong foo"),
38 MockTokenizer.WHITESPACE, false);
39 stream.setEnableChecks(false);
40 OffsetLimitTokenFilter filter = new OffsetLimitTokenFilter(stream, 10);
41 assertTokenStreamContents(filter, new String[] {"short", "toolong"});
43 stream = new MockTokenizer(new StringReader(
44 "short toolong evenmuchlongertext a ab toolong foo"),
45 MockTokenizer.WHITESPACE, false);
46 stream.setEnableChecks(false);
47 filter = new OffsetLimitTokenFilter(stream, 12);
48 assertTokenStreamContents(filter, new String[] {"short", "toolong"});
50 stream = new MockTokenizer(new StringReader(
51 "short toolong evenmuchlongertext a ab toolong foo"),
52 MockTokenizer.WHITESPACE, false);
53 stream.setEnableChecks(false);
54 filter = new OffsetLimitTokenFilter(stream, 30);
55 assertTokenStreamContents(filter, new String[] {"short", "toolong",
56 "evenmuchlongertext"});
58 // TODO: This is not actually testing reuse! (reusableTokenStream is not implemented)
59 checkOneTermReuse(new Analyzer() {
62 public TokenStream tokenStream(String fieldName, Reader reader) {
63 MockTokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false);
64 tokenizer.setEnableChecks(false);
65 return new OffsetLimitTokenFilter(tokenizer, 10);
67 }, "llenges", "llenges");