X-Git-Url: https://git.mdrn.pl/pylucene.git/blobdiff_plain/a2e61f0c04805cfcb8706176758d1283c7e3a55c..aaeed5504b982cf3545252ab528713250aa33eed:/lucene-java-3.4.0/lucene/src/test/org/apache/lucene/analysis/TestStandardAnalyzer.java diff --git a/lucene-java-3.4.0/lucene/src/test/org/apache/lucene/analysis/TestStandardAnalyzer.java b/lucene-java-3.4.0/lucene/src/test/org/apache/lucene/analysis/TestStandardAnalyzer.java deleted file mode 100644 index 485ceef..0000000 --- a/lucene-java-3.4.0/lucene/src/test/org/apache/lucene/analysis/TestStandardAnalyzer.java +++ /dev/null @@ -1,242 +0,0 @@ -package org.apache.lucene.analysis; - -import org.apache.lucene.analysis.standard.StandardAnalyzer; -import org.apache.lucene.analysis.standard.StandardTokenizer; -import org.apache.lucene.util.Version; - -import java.io.IOException; -import java.io.Reader; -import java.io.StringReader; -import java.util.Arrays; - -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -public class TestStandardAnalyzer extends BaseTokenStreamTestCase { - - public void testHugeDoc() throws IOException { - StringBuilder sb = new StringBuilder(); - char whitespace[] = new char[4094]; - Arrays.fill(whitespace, ' '); - sb.append(whitespace); - sb.append("testing 1234"); - String input = sb.toString(); - StandardTokenizer tokenizer = new StandardTokenizer(TEST_VERSION_CURRENT, new StringReader(input)); - BaseTokenStreamTestCase.assertTokenStreamContents(tokenizer, new String[] { "testing", "1234" }); - } - - private Analyzer a = new ReusableAnalyzerBase() { - @Override - protected TokenStreamComponents createComponents - (String fieldName, Reader reader) { - - Tokenizer tokenizer = new StandardTokenizer(TEST_VERSION_CURRENT, reader); - return new TokenStreamComponents(tokenizer); - } - }; - - public void testArmenian() throws Exception { - BaseTokenStreamTestCase.assertAnalyzesTo(a, "Վիքիպեդիայի 13 միլիոն հոդվածները (4,600` հայերեն վիքիպեդիայում) գրվել են կամավորների կողմից ու համարյա բոլոր հոդվածները կարող է խմբագրել ցանկաց մարդ ով կարող է բացել Վիքիպեդիայի կայքը։", - new String[] { "Վիքիպեդիայի", "13", "միլիոն", "հոդվածները", "4,600", "հայերեն", "վիքիպեդիայում", "գրվել", "են", "կամավորների", "կողմից", - "ու", "համարյա", "բոլոր", "հոդվածները", "կարող", "է", "խմբագրել", "ցանկաց", "մարդ", "ով", "կարող", "է", "բացել", "Վիքիպեդիայի", "կայքը" } ); - } - - public void testAmharic() throws Exception { - BaseTokenStreamTestCase.assertAnalyzesTo(a, "ዊኪፔድያ የባለ ብዙ ቋንቋ የተሟላ ትክክለኛና ነጻ መዝገበ ዕውቀት (ኢንሳይክሎፒዲያ) ነው። ማንኛውም", - new String[] { "ዊኪፔድያ", "የባለ", "ብዙ", "ቋንቋ", "የተሟላ", "ትክክለኛና", "ነጻ", "መዝገበ", "ዕውቀት", "ኢንሳይክሎፒዲያ", "ነው", "ማንኛውም" } ); - } - - public void testArabic() throws Exception { - BaseTokenStreamTestCase.assertAnalyzesTo(a, "الفيلم الوثائقي الأول عن ويكيبيديا يسمى \"الحقيقة بالأرقام: قصة ويكيبيديا\" (بالإنجليزية: Truth in Numbers: The Wikipedia Story)، سيتم إطلاقه في 2008.", - new String[] { "الفيلم", "الوثائقي", "الأول", "عن", "ويكيبيديا", "يسمى", "الحقيقة", "بالأرقام", "قصة", "ويكيبيديا", - "بالإنجليزية", "Truth", "in", "Numbers", "The", "Wikipedia", "Story", "سيتم", "إطلاقه", "في", "2008" } ); - } - - public void testAramaic() throws Exception { - BaseTokenStreamTestCase.assertAnalyzesTo(a, "ܘܝܩܝܦܕܝܐ (ܐܢܓܠܝܐ: Wikipedia) ܗܘ ܐܝܢܣܩܠܘܦܕܝܐ ܚܐܪܬܐ ܕܐܢܛܪܢܛ ܒܠܫܢ̈ܐ ܣܓܝܐ̈ܐ܂ ܫܡܗ ܐܬܐ ܡܢ ܡ̈ܠܬܐ ܕ\"ܘܝܩܝ\" ܘ\"ܐܝܢܣܩܠܘܦܕܝܐ\"܀", - new String[] { "ܘܝܩܝܦܕܝܐ", "ܐܢܓܠܝܐ", "Wikipedia", "ܗܘ", "ܐܝܢܣܩܠܘܦܕܝܐ", "ܚܐܪܬܐ", "ܕܐܢܛܪܢܛ", "ܒܠܫܢ̈ܐ", "ܣܓܝܐ̈ܐ", "ܫܡܗ", - "ܐܬܐ", "ܡܢ", "ܡ̈ܠܬܐ", "ܕ", "ܘܝܩܝ", "ܘ", "ܐܝܢܣܩܠܘܦܕܝܐ"}); - } - - public void testBengali() throws Exception { - BaseTokenStreamTestCase.assertAnalyzesTo(a, "এই বিশ্বকোষ পরিচালনা করে উইকিমিডিয়া ফাউন্ডেশন (একটি অলাভজনক সংস্থা)। উইকিপিডিয়ার শুরু ১৫ জানুয়ারি, ২০০১ সালে। এখন পর্যন্ত ২০০টিরও বেশী ভাষায় উইকিপিডিয়া রয়েছে।", - new String[] { "এই", "বিশ্বকোষ", "পরিচালনা", "করে", "উইকিমিডিয়া", "ফাউন্ডেশন", "একটি", "অলাভজনক", "সংস্থা", "উইকিপিডিয়ার", - "শুরু", "১৫", "জানুয়ারি", "২০০১", "সালে", "এখন", "পর্যন্ত", "২০০টিরও", "বেশী", "ভাষায়", "উইকিপিডিয়া", "রয়েছে" }); - } - - public void testFarsi() throws Exception { - BaseTokenStreamTestCase.assertAnalyzesTo(a, "ویکی پدیای انگلیسی در تاریخ ۲۵ دی ۱۳۷۹ به صورت مکملی برای دانشنامهٔ تخصصی نوپدیا نوشته شد.", - new String[] { "ویکی", "پدیای", "انگلیسی", "در", "تاریخ", "۲۵", "دی", "۱۳۷۹", "به", "صورت", "مکملی", - "برای", "دانشنامهٔ", "تخصصی", "نوپدیا", "نوشته", "شد" }); - } - - public void testGreek() throws Exception { - BaseTokenStreamTestCase.assertAnalyzesTo(a, "Γράφεται σε συνεργασία από εθελοντές με το λογισμικό wiki, κάτι που σημαίνει ότι άρθρα μπορεί να προστεθούν ή να αλλάξουν από τον καθένα.", - new String[] { "Γράφεται", "σε", "συνεργασία", "από", "εθελοντές", "με", "το", "λογισμικό", "wiki", "κάτι", "που", - "σημαίνει", "ότι", "άρθρα", "μπορεί", "να", "προστεθούν", "ή", "να", "αλλάξουν", "από", "τον", "καθένα" }); - } - - public void testThai() throws Exception { - BaseTokenStreamTestCase.assertAnalyzesTo(a, "การที่ได้ต้องแสดงว่างานดี. แล้วเธอจะไปไหน? ๑๒๓๔", - new String[] { "การที่ได้ต้องแสดงว่างานดี", "แล้วเธอจะไปไหน", "๑๒๓๔" }); - } - - public void testLao() throws Exception { - BaseTokenStreamTestCase.assertAnalyzesTo(a, "ສາທາລະນະລັດ ປະຊາທິປະໄຕ ປະຊາຊົນລາວ", - new String[] { "ສາທາລະນະລັດ", "ປະຊາທິປະໄຕ", "ປະຊາຊົນລາວ" }); - } - - public void testTibetan() throws Exception { - BaseTokenStreamTestCase.assertAnalyzesTo(a, "སྣོན་མཛོད་དང་ལས་འདིས་བོད་ཡིག་མི་ཉམས་གོང་འཕེལ་དུ་གཏོང་བར་ཧ་ཅང་དགེ་མཚན་མཆིས་སོ། །", - new String[] { "སྣོན", "མཛོད", "དང", "ལས", "འདིས", "བོད", "ཡིག", - "མི", "ཉམས", "གོང", "འཕེལ", "དུ", "གཏོང", "བར", - "ཧ", "ཅང", "དགེ", "མཚན", "མཆིས", "སོ" }); - } - - /* - * For chinese, tokenize as char (these can later form bigrams or whatever) - */ - public void testChinese() throws Exception { - BaseTokenStreamTestCase.assertAnalyzesTo(a, "我是中国人。 1234 Tests ", - new String[] { "我", "是", "中", "国", "人", "1234", "Tests"}); - } - - public void testEmpty() throws Exception { - BaseTokenStreamTestCase.assertAnalyzesTo(a, "", new String[] {}); - BaseTokenStreamTestCase.assertAnalyzesTo(a, ".", new String[] {}); - BaseTokenStreamTestCase.assertAnalyzesTo(a, " ", new String[] {}); - } - - /* test various jira issues this analyzer is related to */ - - public void testLUCENE1545() throws Exception { - /* - * Standard analyzer does not correctly tokenize combining character U+0364 COMBINING LATIN SMALL LETTRE E. - * The word "moͤchte" is incorrectly tokenized into "mo" "chte", the combining character is lost. - * Expected result is only on token "moͤchte". - */ - BaseTokenStreamTestCase.assertAnalyzesTo(a, "moͤchte", new String[] { "moͤchte" }); - } - - /* Tests from StandardAnalyzer, just to show behavior is similar */ - public void testAlphanumericSA() throws Exception { - // alphanumeric tokens - BaseTokenStreamTestCase.assertAnalyzesTo(a, "B2B", new String[]{"B2B"}); - BaseTokenStreamTestCase.assertAnalyzesTo(a, "2B", new String[]{"2B"}); - } - - public void testDelimitersSA() throws Exception { - // other delimiters: "-", "/", "," - BaseTokenStreamTestCase.assertAnalyzesTo(a, "some-dashed-phrase", new String[]{"some", "dashed", "phrase"}); - BaseTokenStreamTestCase.assertAnalyzesTo(a, "dogs,chase,cats", new String[]{"dogs", "chase", "cats"}); - BaseTokenStreamTestCase.assertAnalyzesTo(a, "ac/dc", new String[]{"ac", "dc"}); - } - - public void testApostrophesSA() throws Exception { - // internal apostrophes: O'Reilly, you're, O'Reilly's - BaseTokenStreamTestCase.assertAnalyzesTo(a, "O'Reilly", new String[]{"O'Reilly"}); - BaseTokenStreamTestCase.assertAnalyzesTo(a, "you're", new String[]{"you're"}); - BaseTokenStreamTestCase.assertAnalyzesTo(a, "she's", new String[]{"she's"}); - BaseTokenStreamTestCase.assertAnalyzesTo(a, "Jim's", new String[]{"Jim's"}); - BaseTokenStreamTestCase.assertAnalyzesTo(a, "don't", new String[]{"don't"}); - BaseTokenStreamTestCase.assertAnalyzesTo(a, "O'Reilly's", new String[]{"O'Reilly's"}); - } - - public void testNumericSA() throws Exception { - // floating point, serial, model numbers, ip addresses, etc. - BaseTokenStreamTestCase.assertAnalyzesTo(a, "21.35", new String[]{"21.35"}); - BaseTokenStreamTestCase.assertAnalyzesTo(a, "R2D2 C3PO", new String[]{"R2D2", "C3PO"}); - BaseTokenStreamTestCase.assertAnalyzesTo(a, "216.239.63.104", new String[]{"216.239.63.104"}); - BaseTokenStreamTestCase.assertAnalyzesTo(a, "216.239.63.104", new String[]{"216.239.63.104"}); - } - - public void testTextWithNumbersSA() throws Exception { - // numbers - BaseTokenStreamTestCase.assertAnalyzesTo(a, "David has 5000 bones", new String[]{"David", "has", "5000", "bones"}); - } - - public void testVariousTextSA() throws Exception { - // various - BaseTokenStreamTestCase.assertAnalyzesTo(a, "C embedded developers wanted", new String[]{"C", "embedded", "developers", "wanted"}); - BaseTokenStreamTestCase.assertAnalyzesTo(a, "foo bar FOO BAR", new String[]{"foo", "bar", "FOO", "BAR"}); - BaseTokenStreamTestCase.assertAnalyzesTo(a, "foo bar . FOO <> BAR", new String[]{"foo", "bar", "FOO", "BAR"}); - BaseTokenStreamTestCase.assertAnalyzesTo(a, "\"QUOTED\" word", new String[]{"QUOTED", "word"}); - } - - public void testKoreanSA() throws Exception { - // Korean words - BaseTokenStreamTestCase.assertAnalyzesTo(a, "안녕하세요 한글입니다", new String[]{"안녕하세요", "한글입니다"}); - } - - public void testOffsets() throws Exception { - BaseTokenStreamTestCase.assertAnalyzesTo(a, "David has 5000 bones", - new String[] {"David", "has", "5000", "bones"}, - new int[] {0, 6, 10, 15}, - new int[] {5, 9, 14, 20}); - } - - public void testTypes() throws Exception { - BaseTokenStreamTestCase.assertAnalyzesTo(a, "David has 5000 bones", - new String[] {"David", "has", "5000", "bones"}, - new String[] { "", "", "", "" }); - } - - public void testUnicodeWordBreaks() throws Exception { - WordBreakTestUnicode_6_0_0 wordBreakTest = new WordBreakTestUnicode_6_0_0(); - wordBreakTest.test(a); - } - - public void testSupplementary() throws Exception { - BaseTokenStreamTestCase.assertAnalyzesTo(a, "𩬅艱鍟䇹愯瀛", - new String[] {"𩬅", "艱", "鍟", "䇹", "愯", "瀛"}, - new String[] { "", "", "", "", "", "" }); - } - - public void testKorean() throws Exception { - BaseTokenStreamTestCase.assertAnalyzesTo(a, "훈민정음", - new String[] { "훈민정음" }, - new String[] { "" }); - } - - public void testJapanese() throws Exception { - BaseTokenStreamTestCase.assertAnalyzesTo(a, "仮名遣い カタカナ", - new String[] { "仮", "名", "遣", "い", "カタカナ" }, - new String[] { "", "", "", "", "" }); - } - - public void testCombiningMarks() throws Exception { - checkOneTerm(a, "ざ", "ざ"); // hiragana - checkOneTerm(a, "ザ", "ザ"); // katakana - checkOneTerm(a, "壹゙", "壹゙"); // ideographic - checkOneTerm(a, "아゙", "아゙"); // hangul - } - - /** @deprecated remove this and sophisticated backwards layer in 5.0 */ - @Deprecated - public void testCombiningMarksBackwards() throws Exception { - Analyzer a = new StandardAnalyzer(Version.LUCENE_33); - checkOneTerm(a, "ざ", "さ"); // hiragana Bug - checkOneTerm(a, "ザ", "ザ"); // katakana Works - checkOneTerm(a, "壹゙", "壹"); // ideographic Bug - checkOneTerm(a, "아゙", "아゙"); // hangul Works - } - - /** blast some random strings through the analyzer */ - public void testRandomStrings() throws Exception { - checkRandomData(random, new StandardAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); - } -}