X-Git-Url: https://git.mdrn.pl/pylucene.git/blobdiff_plain/a2e61f0c04805cfcb8706176758d1283c7e3a55c..aaeed5504b982cf3545252ab528713250aa33eed:/lucene-java-3.5.0/lucene/src/java/org/apache/lucene/analysis/StopAnalyzer.java diff --git a/lucene-java-3.5.0/lucene/src/java/org/apache/lucene/analysis/StopAnalyzer.java b/lucene-java-3.5.0/lucene/src/java/org/apache/lucene/analysis/StopAnalyzer.java new file mode 100644 index 0000000..9bd6e75 --- /dev/null +++ b/lucene-java-3.5.0/lucene/src/java/org/apache/lucene/analysis/StopAnalyzer.java @@ -0,0 +1,111 @@ +package org.apache.lucene.analysis; + +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import java.io.File; +import java.io.IOException; +import java.io.Reader; +import java.util.Arrays; +import java.util.Set; +import java.util.List; + +import org.apache.lucene.util.IOUtils; +import org.apache.lucene.util.Version; + +/** Filters {@link LetterTokenizer} with {@link LowerCaseFilter} and {@link StopFilter}. + * + * + *

You must specify the required {@link Version} + * compatibility when creating StopAnalyzer: + *

+*/ + +public final class StopAnalyzer extends StopwordAnalyzerBase { + + /** An unmodifiable set containing some common English words that are not usually useful + for searching.*/ + public static final Set ENGLISH_STOP_WORDS_SET; + + static { + final List stopWords = Arrays.asList( + "a", "an", "and", "are", "as", "at", "be", "but", "by", + "for", "if", "in", "into", "is", "it", + "no", "not", "of", "on", "or", "such", + "that", "the", "their", "then", "there", "these", + "they", "this", "to", "was", "will", "with" + ); + final CharArraySet stopSet = new CharArraySet(Version.LUCENE_CURRENT, + stopWords.size(), false); + stopSet.addAll(stopWords); + ENGLISH_STOP_WORDS_SET = CharArraySet.unmodifiableSet(stopSet); + } + + /** Builds an analyzer which removes words in + * {@link #ENGLISH_STOP_WORDS_SET}. + * @param matchVersion See
above + */ + public StopAnalyzer(Version matchVersion) { + this(matchVersion, ENGLISH_STOP_WORDS_SET); + } + + /** Builds an analyzer with the stop words from the given set. + * @param matchVersion See above + * @param stopWords Set of stop words */ + public StopAnalyzer(Version matchVersion, Set stopWords) { + super(matchVersion, stopWords); + } + + /** Builds an analyzer with the stop words from the given file. + * @see WordlistLoader#getWordSet(Reader, Version) + * @param matchVersion See above + * @param stopwordsFile File to load stop words from */ + public StopAnalyzer(Version matchVersion, File stopwordsFile) throws IOException { + this(matchVersion, WordlistLoader.getWordSet(IOUtils.getDecodingReader(stopwordsFile, + IOUtils.CHARSET_UTF_8), matchVersion)); + } + + /** Builds an analyzer with the stop words from the given reader. + * @see WordlistLoader#getWordSet(Reader, Version) + * @param matchVersion See above + * @param stopwords Reader to load stop words from */ + public StopAnalyzer(Version matchVersion, Reader stopwords) throws IOException { + this(matchVersion, WordlistLoader.getWordSet(stopwords, matchVersion)); + } + + /** + * Creates + * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents} + * used to tokenize all the text in the provided {@link Reader}. + * + * @return {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents} + * built from a {@link LowerCaseTokenizer} filtered with + * {@link StopFilter} + */ + @Override + protected TokenStreamComponents createComponents(String fieldName, + Reader reader) { + final Tokenizer source = new LowerCaseTokenizer(matchVersion, reader); + return new TokenStreamComponents(source, new StopFilter(matchVersion, + source, stopwords)); + } +} +