1 package org.apache.lucene.analysis.nl;
4 * Licensed to the Apache Software Foundation (ASF) under one or more
5 * contributor license agreements. See the NOTICE file distributed with
6 * this work for additional information regarding copyright ownership.
7 * The ASF licenses this file to You under the Apache License, Version 2.0
8 * (the "License"); you may not use this file except in compliance with
9 * the License. You may obtain a copy of the License at
11 * http://www.apache.org/licenses/LICENSE-2.0
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
20 import org.apache.lucene.analysis.Analyzer;
21 import org.apache.lucene.analysis.CharArraySet;
22 import org.apache.lucene.analysis.KeywordMarkerFilter;
23 import org.apache.lucene.analysis.LowerCaseFilter;
24 import org.apache.lucene.analysis.ReusableAnalyzerBase;
25 import org.apache.lucene.analysis.StopFilter;
26 import org.apache.lucene.analysis.TokenStream;
27 import org.apache.lucene.analysis.Tokenizer;
28 import org.apache.lucene.analysis.WordlistLoader;
29 import org.apache.lucene.analysis.miscellaneous.StemmerOverrideFilter;
30 import org.apache.lucene.analysis.snowball.SnowballFilter;
31 import org.apache.lucene.analysis.standard.StandardFilter;
32 import org.apache.lucene.analysis.standard.StandardTokenizer;
33 import org.apache.lucene.analysis.standard.StandardAnalyzer; // for javadoc
34 import org.apache.lucene.util.Version;
37 import java.io.IOException;
38 import java.io.Reader;
39 import java.util.Collections;
40 import java.util.HashMap;
41 import java.util.HashSet;
46 * {@link Analyzer} for Dutch language.
48 * Supports an external list of stopwords (words that
49 * will not be indexed at all), an external list of exclusions (word that will
50 * not be stemmed, but indexed) and an external list of word-stem pairs that overrule
51 * the algorithm (dictionary stemming).
52 * A default set of stopwords is used unless an alternative list is specified, but the
53 * exclusion list is empty by default.
57 * <p>You must specify the required {@link Version}
58 * compatibility when creating DutchAnalyzer:
60 * <li> As of 3.1, Snowball stemming is done with SnowballFilter,
61 * LowerCaseFilter is used prior to StopFilter, and Snowball
62 * stopwords are used by default.
63 * <li> As of 2.9, StopFilter preserves position
67 * <p><b>NOTE</b>: This class uses the same {@link Version}
68 * dependent settings as {@link StandardAnalyzer}.</p>
70 public final class DutchAnalyzer extends ReusableAnalyzerBase {
72 * List of typical Dutch stopwords.
73 * @deprecated use {@link #getDefaultStopSet()} instead
76 public final static String[] DUTCH_STOP_WORDS = getDefaultStopSet().toArray(new String[0]);
78 /** File containing default Dutch stopwords. */
79 public final static String DEFAULT_STOPWORD_FILE = "dutch_stop.txt";
82 * Returns an unmodifiable instance of the default stop-words set.
83 * @return an unmodifiable instance of the default stop-words set.
85 public static Set<?> getDefaultStopSet(){
86 return DefaultSetHolder.DEFAULT_STOP_SET;
89 private static class DefaultSetHolder {
90 static final Set<?> DEFAULT_STOP_SET;
94 DEFAULT_STOP_SET = WordlistLoader.getSnowballWordSet(SnowballFilter.class,
95 DEFAULT_STOPWORD_FILE);
96 } catch (IOException ex) {
97 // default set should always be present as it is part of the
99 throw new RuntimeException("Unable to load default stopword set");
106 * Contains the stopwords used with the StopFilter.
108 private final Set<?> stoptable;
111 * Contains words that should be indexed but not stemmed.
113 private Set<?> excltable = Collections.emptySet();
115 private Map<String, String> stemdict = new HashMap<String, String>();
116 private final Version matchVersion;
119 * Builds an analyzer with the default stop words ({@link #getDefaultStopSet()})
120 * and a few default entries for the stem exclusion table.
123 public DutchAnalyzer(Version matchVersion) {
124 this(matchVersion, DefaultSetHolder.DEFAULT_STOP_SET);
125 stemdict.put("fiets", "fiets"); //otherwise fiet
126 stemdict.put("bromfiets", "bromfiets"); //otherwise bromfiet
127 stemdict.put("ei", "eier");
128 stemdict.put("kind", "kinder");
131 public DutchAnalyzer(Version matchVersion, Set<?> stopwords){
132 this(matchVersion, stopwords, CharArraySet.EMPTY_SET);
135 public DutchAnalyzer(Version matchVersion, Set<?> stopwords, Set<?> stemExclusionTable){
136 stoptable = CharArraySet.unmodifiableSet(CharArraySet.copy(matchVersion, stopwords));
137 excltable = CharArraySet.unmodifiableSet(CharArraySet.copy(matchVersion, stemExclusionTable));
138 this.matchVersion = matchVersion;
142 * Builds an analyzer with the given stop words.
144 * @param matchVersion
146 * @deprecated use {@link #DutchAnalyzer(Version, Set)} instead
149 public DutchAnalyzer(Version matchVersion, String... stopwords) {
150 this(matchVersion, StopFilter.makeStopSet(matchVersion, stopwords));
154 * Builds an analyzer with the given stop words.
157 * @deprecated use {@link #DutchAnalyzer(Version, Set)} instead
160 public DutchAnalyzer(Version matchVersion, HashSet<?> stopwords) {
161 this(matchVersion, (Set<?>)stopwords);
165 * Builds an analyzer with the given stop words.
168 * @deprecated use {@link #DutchAnalyzer(Version, Set)} instead
171 public DutchAnalyzer(Version matchVersion, File stopwords) {
172 // this is completely broken!
174 stoptable = org.apache.lucene.analysis.WordlistLoader.getWordSet(stopwords);
175 } catch (IOException e) {
176 // TODO: throw IOException
177 throw new RuntimeException(e);
179 this.matchVersion = matchVersion;
183 * Builds an exclusionlist from an array of Strings.
185 * @param exclusionlist
186 * @deprecated use {@link #DutchAnalyzer(Version, Set, Set)} instead
189 public void setStemExclusionTable(String... exclusionlist) {
190 excltable = StopFilter.makeStopSet(matchVersion, exclusionlist);
191 setPreviousTokenStream(null); // force a new stemmer to be created
195 * Builds an exclusionlist from a Hashtable.
196 * @deprecated use {@link #DutchAnalyzer(Version, Set, Set)} instead
199 public void setStemExclusionTable(HashSet<?> exclusionlist) {
200 excltable = exclusionlist;
201 setPreviousTokenStream(null); // force a new stemmer to be created
205 * Builds an exclusionlist from the words contained in the given file.
206 * @deprecated use {@link #DutchAnalyzer(Version, Set, Set)} instead
209 public void setStemExclusionTable(File exclusionlist) {
211 excltable = org.apache.lucene.analysis.WordlistLoader.getWordSet(exclusionlist);
212 setPreviousTokenStream(null); // force a new stemmer to be created
213 } catch (IOException e) {
214 // TODO: throw IOException
215 throw new RuntimeException(e);
220 * Reads a stemdictionary file , that overrules the stemming algorithm
221 * This is a textfile that contains per line
222 * <tt>word<b>\t</b>stem</tt>, i.e: two tab seperated words
223 * @deprecated This prevents reuse of TokenStreams. If you wish to use a custom
224 * stem dictionary, create your own Analyzer with {@link StemmerOverrideFilter}
227 public void setStemDictionary(File stemdictFile) {
229 stemdict = WordlistLoader.getStemDict(stemdictFile);
230 setPreviousTokenStream(null); // force a new stemmer to be created
231 } catch (IOException e) {
232 // TODO: throw IOException
233 throw new RuntimeException(e);
239 * Returns a (possibly reused) {@link TokenStream} which tokenizes all the
240 * text in the provided {@link Reader}.
242 * @return A {@link TokenStream} built from a {@link StandardTokenizer}
243 * filtered with {@link StandardFilter}, {@link LowerCaseFilter},
244 * {@link StopFilter}, {@link KeywordMarkerFilter} if a stem exclusion set is provided,
245 * {@link StemmerOverrideFilter}, and {@link SnowballFilter}
248 protected TokenStreamComponents createComponents(String fieldName,
250 if (matchVersion.onOrAfter(Version.LUCENE_31)) {
251 final Tokenizer source = new StandardTokenizer(matchVersion, aReader);
252 TokenStream result = new StandardFilter(matchVersion, source);
253 result = new LowerCaseFilter(matchVersion, result);
254 result = new StopFilter(matchVersion, result, stoptable);
255 if (!excltable.isEmpty())
256 result = new KeywordMarkerFilter(result, excltable);
257 if (!stemdict.isEmpty())
258 result = new StemmerOverrideFilter(matchVersion, result, stemdict);
259 result = new SnowballFilter(result, new org.tartarus.snowball.ext.DutchStemmer());
260 return new TokenStreamComponents(source, result);
262 final Tokenizer source = new StandardTokenizer(matchVersion, aReader);
263 TokenStream result = new StandardFilter(matchVersion, source);
264 result = new StopFilter(matchVersion, result, stoptable);
265 if (!excltable.isEmpty())
266 result = new KeywordMarkerFilter(result, excltable);
267 result = new DutchStemFilter(result, stemdict);
268 return new TokenStreamComponents(source, result);