1 package org.apache.lucene.analysis.cn;
4 * Licensed to the Apache Software Foundation (ASF) under one or more
5 * contributor license agreements. See the NOTICE file distributed with
6 * this work for additional information regarding copyright ownership.
7 * The ASF licenses this file to You under the Apache License, Version 2.0
8 * (the "License"); you may not use this file except in compliance with
9 * the License. You may obtain a copy of the License at
11 * http://www.apache.org/licenses/LICENSE-2.0
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
20 import java.io.IOException;
21 import java.io.Reader;
22 import java.io.StringReader;
24 import org.apache.lucene.analysis.BaseTokenStreamTestCase;
25 import org.apache.lucene.analysis.Analyzer;
26 import org.apache.lucene.analysis.TokenStream;
27 import org.apache.lucene.analysis.WhitespaceTokenizer;
28 import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
29 import org.apache.lucene.util.Version;
32 /** @deprecated Remove this test when ChineseAnalyzer is removed. */
34 public class TestChineseTokenizer extends BaseTokenStreamTestCase
36 public void testOtherLetterOffset() throws IOException
39 ChineseTokenizer tokenizer = new ChineseTokenizer(new StringReader(s));
41 int correctStartOffset = 0;
42 int correctEndOffset = 1;
43 OffsetAttribute offsetAtt = tokenizer.getAttribute(OffsetAttribute.class);
44 while (tokenizer.incrementToken()) {
45 assertEquals(correctStartOffset, offsetAtt.startOffset());
46 assertEquals(correctEndOffset, offsetAtt.endOffset());
52 public void testReusableTokenStream() throws Exception
54 Analyzer a = new ChineseAnalyzer();
55 assertAnalyzesToReuse(a, "中华人民共和国",
56 new String[] { "中", "华", "人", "民", "共", "和", "国" },
57 new int[] { 0, 1, 2, 3, 4, 5, 6 },
58 new int[] { 1, 2, 3, 4, 5, 6, 7 });
59 assertAnalyzesToReuse(a, "北京市",
60 new String[] { "北", "京", "市" },
61 new int[] { 0, 1, 2 },
62 new int[] { 1, 2, 3 });
66 * Analyzer that just uses ChineseTokenizer, not ChineseFilter.
67 * convenience to show the behavior of the tokenizer
69 private class JustChineseTokenizerAnalyzer extends Analyzer {
71 public TokenStream tokenStream(String fieldName, Reader reader) {
72 return new ChineseTokenizer(reader);
77 * Analyzer that just uses ChineseFilter, not ChineseTokenizer.
78 * convenience to show the behavior of the filter.
80 private class JustChineseFilterAnalyzer extends Analyzer {
82 public TokenStream tokenStream(String fieldName, Reader reader) {
83 return new ChineseFilter(new WhitespaceTokenizer(Version.LUCENE_CURRENT, reader));
88 * ChineseTokenizer tokenizes numbers as one token, but they are filtered by ChineseFilter
90 public void testNumerics() throws Exception
92 Analyzer justTokenizer = new JustChineseTokenizerAnalyzer();
93 assertAnalyzesTo(justTokenizer, "中1234", new String[] { "中", "1234" });
95 // in this case the ChineseAnalyzer (which applies ChineseFilter) will remove the numeric token.
96 Analyzer a = new ChineseAnalyzer();
97 assertAnalyzesTo(a, "中1234", new String[] { "中" });
101 * ChineseTokenizer tokenizes english similar to SimpleAnalyzer.
102 * it will lowercase terms automatically.
104 * ChineseFilter has an english stopword list, it also removes any single character tokens.
105 * the stopword list is case-sensitive.
107 public void testEnglish() throws Exception
109 Analyzer chinese = new ChineseAnalyzer();
110 assertAnalyzesTo(chinese, "This is a Test. b c d",
111 new String[] { "test" });
113 Analyzer justTokenizer = new JustChineseTokenizerAnalyzer();
114 assertAnalyzesTo(justTokenizer, "This is a Test. b c d",
115 new String[] { "this", "is", "a", "test", "b", "c", "d" });
117 Analyzer justFilter = new JustChineseFilterAnalyzer();
118 assertAnalyzesTo(justFilter, "This is a Test. b c d",
119 new String[] { "This", "Test." });