package org.apache.lucene.analysis.standard.std31; /** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import org.apache.lucene.analysis.standard.StandardTokenizer; import org.apache.lucene.analysis.standard.StandardTokenizerInterface; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; /** * This class implements StandardTokenizer, except with a bug * (https://issues.apache.org/jira/browse/LUCENE-3358) where Han and Hiragana * characters would be split from combining characters: * @deprecated This class is only for exact backwards compatibility */ @Deprecated %% %unicode 6.0 %integer %final %public %class StandardTokenizerImpl31 %implements StandardTokenizerInterface %function getNextToken %char %include src/java/org/apache/lucene/analysis/standard/std31/SUPPLEMENTARY.jflex-macro ALetter = ([\p{WB:ALetter}] | {ALetterSupp}) Format = ([\p{WB:Format}] | {FormatSupp}) Numeric = ([\p{WB:Numeric}] | {NumericSupp}) Extend = ([\p{WB:Extend}] | {ExtendSupp}) Katakana = ([\p{WB:Katakana}] | {KatakanaSupp}) MidLetter = ([\p{WB:MidLetter}] | {MidLetterSupp}) MidNum = ([\p{WB:MidNum}] | {MidNumSupp}) MidNumLet = ([\p{WB:MidNumLet}] | {MidNumLetSupp}) ExtendNumLet = ([\p{WB:ExtendNumLet}] | {ExtendNumLetSupp}) ComplexContext = ([\p{LB:Complex_Context}] | {ComplexContextSupp}) Han = ([\p{Script:Han}] | {HanSupp}) Hiragana = ([\p{Script:Hiragana}] | {HiraganaSupp}) // Script=Hangul & Aletter HangulEx = (!(!\p{Script:Hangul}|!\p{WB:ALetter})) ({Format} | {Extend})* // UAX#29 WB4. X (Extend | Format)* --> X // ALetterEx = {ALetter} ({Format} | {Extend})* // TODO: Convert hard-coded full-width numeric range to property intersection (something like [\p{Full-Width}&&\p{Numeric}]) once JFlex supports it NumericEx = ({Numeric} | [\uFF10-\uFF19]) ({Format} | {Extend})* KatakanaEx = {Katakana} ({Format} | {Extend})* MidLetterEx = ({MidLetter} | {MidNumLet}) ({Format} | {Extend})* MidNumericEx = ({MidNum} | {MidNumLet}) ({Format} | {Extend})* ExtendNumLetEx = {ExtendNumLet} ({Format} | {Extend})* %{ /** Alphanumeric sequences */ public static final int WORD_TYPE = StandardTokenizer.ALPHANUM; /** Numbers */ public static final int NUMERIC_TYPE = StandardTokenizer.NUM; /** * Chars in class \p{Line_Break = Complex_Context} are from South East Asian * scripts (Thai, Lao, Myanmar, Khmer, etc.). Sequences of these are kept * together as as a single token rather than broken up, because the logic * required to break them at word boundaries is too complex for UAX#29. *
* See Unicode Line Breaking Algorithm: http://www.unicode.org/reports/tr14/#SA
*/
public static final int SOUTH_EAST_ASIAN_TYPE = StandardTokenizer.SOUTHEAST_ASIAN;
public static final int IDEOGRAPHIC_TYPE = StandardTokenizer.IDEOGRAPHIC;
public static final int HIRAGANA_TYPE = StandardTokenizer.HIRAGANA;
public static final int KATAKANA_TYPE = StandardTokenizer.KATAKANA;
public static final int HANGUL_TYPE = StandardTokenizer.HANGUL;
public final int yychar()
{
return yychar;
}
/**
* Fills CharTermAttribute with the current token text.
*/
public final void getText(CharTermAttribute t) {
t.copyBuffer(zzBuffer, zzStartRead, zzMarkedPos-zzStartRead);
}
%}
%%
// UAX#29 WB1. sot ÷
// WB2. ÷ eot
//
<