1 package org.apache.lucene.analysis.ar;
4 * Licensed to the Apache Software Foundation (ASF) under one or more
5 * contributor license agreements. See the NOTICE file distributed with
6 * this work for additional information regarding copyright ownership.
7 * The ASF licenses this file to You under the Apache License, Version 2.0
8 * (the "License"); you may not use this file except in compliance with
9 * the License. You may obtain a copy of the License at
11 * http://www.apache.org/licenses/LICENSE-2.0
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
21 import java.io.IOException;
22 import java.io.Reader;
23 import java.util.Hashtable;
26 import org.apache.lucene.analysis.Analyzer;
27 import org.apache.lucene.analysis.LowerCaseFilter;
28 import org.apache.lucene.analysis.CharArraySet;
29 import org.apache.lucene.analysis.KeywordMarkerFilter;
30 import org.apache.lucene.analysis.StopFilter;
31 import org.apache.lucene.analysis.StopwordAnalyzerBase;
32 import org.apache.lucene.analysis.TokenStream;
33 import org.apache.lucene.analysis.Tokenizer;
34 import org.apache.lucene.analysis.WordlistLoader;
35 import org.apache.lucene.analysis.standard.StandardTokenizer;
36 import org.apache.lucene.util.IOUtils;
37 import org.apache.lucene.util.Version;
40 * {@link Analyzer} for Arabic.
42 * This analyzer implements light-stemming as specified by:
44 * Light Stemming for Arabic Information Retrieval
46 * http://www.mtholyoke.edu/~lballest/Pubs/arab_stem05.pdf
48 * The analysis package contains three primary components:
50 * <li>{@link ArabicNormalizationFilter}: Arabic orthographic normalization.
51 * <li>{@link ArabicStemFilter}: Arabic light stemming
52 * <li>Arabic stop words file: a set of default Arabic stop words.
56 public final class ArabicAnalyzer extends StopwordAnalyzerBase {
59 * File containing default Arabic stopwords.
61 * Default stopword list is from http://members.unine.ch/jacques.savoy/clef/index.html
62 * The stopword list is BSD-Licensed.
64 public final static String DEFAULT_STOPWORD_FILE = "stopwords.txt";
67 * The comment character in the stopwords file. All lines prefixed with this will be ignored
68 * @deprecated use {@link WordlistLoader#getWordSet(Reader, String, Version)} directly
70 // TODO make this private
72 public static final String STOPWORDS_COMMENT = "#";
75 * Returns an unmodifiable instance of the default stop-words set.
76 * @return an unmodifiable instance of the default stop-words set.
78 public static Set<?> getDefaultStopSet(){
79 return DefaultSetHolder.DEFAULT_STOP_SET;
83 * Atomically loads the DEFAULT_STOP_SET in a lazy fashion once the outer class
84 * accesses the static final set the first time.;
86 private static class DefaultSetHolder {
87 static final Set<?> DEFAULT_STOP_SET;
91 DEFAULT_STOP_SET = loadStopwordSet(false, ArabicAnalyzer.class, DEFAULT_STOPWORD_FILE, STOPWORDS_COMMENT);
92 } catch (IOException ex) {
93 // default set should always be present as it is part of the
95 throw new RuntimeException("Unable to load default stopword set");
100 private final Set<?> stemExclusionSet;
103 * Builds an analyzer with the default stop words: {@link #DEFAULT_STOPWORD_FILE}.
105 public ArabicAnalyzer(Version matchVersion) {
106 this(matchVersion, DefaultSetHolder.DEFAULT_STOP_SET);
110 * Builds an analyzer with the given stop words
112 * @param matchVersion
113 * lucene compatibility version
117 public ArabicAnalyzer(Version matchVersion, Set<?> stopwords){
118 this(matchVersion, stopwords, CharArraySet.EMPTY_SET);
122 * Builds an analyzer with the given stop word. If a none-empty stem exclusion set is
123 * provided this analyzer will add a {@link KeywordMarkerFilter} before
124 * {@link ArabicStemFilter}.
126 * @param matchVersion
127 * lucene compatibility version
130 * @param stemExclusionSet
131 * a set of terms not to be stemmed
133 public ArabicAnalyzer(Version matchVersion, Set<?> stopwords, Set<?> stemExclusionSet){
134 super(matchVersion, stopwords);
135 this.stemExclusionSet = CharArraySet.unmodifiableSet(CharArraySet.copy(
136 matchVersion, stemExclusionSet));
140 * Builds an analyzer with the given stop words.
141 * @deprecated use {@link #ArabicAnalyzer(Version, Set)} instead
144 public ArabicAnalyzer( Version matchVersion, String... stopwords ) {
145 this(matchVersion, StopFilter.makeStopSet(matchVersion, stopwords ));
149 * Builds an analyzer with the given stop words.
150 * @deprecated use {@link #ArabicAnalyzer(Version, Set)} instead
153 public ArabicAnalyzer( Version matchVersion, Hashtable<?,?> stopwords ) {
154 this(matchVersion, stopwords.keySet());
158 * Builds an analyzer with the given stop words. Lines can be commented out using {@link #STOPWORDS_COMMENT}
159 * @deprecated use {@link #ArabicAnalyzer(Version, Set)} instead
162 public ArabicAnalyzer( Version matchVersion, File stopwords ) throws IOException {
163 this(matchVersion, WordlistLoader.getWordSet(IOUtils.getDecodingReader(stopwords,
164 IOUtils.CHARSET_UTF_8), STOPWORDS_COMMENT, matchVersion));
169 * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
170 * used to tokenize all the text in the provided {@link Reader}.
172 * @return {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
173 * built from an {@link StandardTokenizer} filtered with
174 * {@link LowerCaseFilter}, {@link StopFilter},
175 * {@link ArabicNormalizationFilter}, {@link KeywordMarkerFilter}
176 * if a stem exclusion set is provided and {@link ArabicStemFilter}.
179 protected TokenStreamComponents createComponents(String fieldName,
181 final Tokenizer source = matchVersion.onOrAfter(Version.LUCENE_31) ?
182 new StandardTokenizer(matchVersion, reader) : new ArabicLetterTokenizer(matchVersion, reader);
183 TokenStream result = new LowerCaseFilter(matchVersion, source);
184 // the order here is important: the stopword list is not normalized!
185 result = new StopFilter( matchVersion, result, stopwords);
186 // TODO maybe we should make ArabicNormalization filter also KeywordAttribute aware?!
187 result = new ArabicNormalizationFilter(result);
188 if(!stemExclusionSet.isEmpty()) {
189 result = new KeywordMarkerFilter(result, stemExclusionSet);
191 return new TokenStreamComponents(source, new ArabicStemFilter(result));