1 package org.apache.lucene.search;
4 * Licensed to the Apache Software Foundation (ASF) under one or more
5 * contributor license agreements. See the NOTICE file distributed with
6 * this work for additional information regarding copyright ownership.
7 * The ASF licenses this file to You under the Apache License, Version 2.0
8 * (the "License"); you may not use this file except in compliance with
9 * the License. You may obtain a copy of the License at
11 * http://www.apache.org/licenses/LICENSE-2.0
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
20 import org.apache.lucene.util.LuceneTestCase;
21 import org.apache.lucene.analysis.MockAnalyzer;
22 import org.apache.lucene.analysis.MockTokenizer;
23 import org.apache.lucene.document.Document;
24 import org.apache.lucene.document.Field;
25 import org.apache.lucene.index.*;
26 import org.apache.lucene.index.IndexWriterConfig.OpenMode;
27 import org.apache.lucene.store.Directory;
28 import org.apache.lucene.util.English;
30 import java.io.IOException;
31 import java.util.HashMap;
33 import java.util.SortedSet;
35 public class TestTermVectors extends LuceneTestCase {
36 private IndexSearcher searcher;
37 private IndexReader reader;
38 private Directory directory;
41 public void setUp() throws Exception {
43 directory = newDirectory();
44 RandomIndexWriter writer = new RandomIndexWriter(random, directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.SIMPLE, true)).setMergePolicy(newLogMergePolicy()));
45 //writer.setUseCompoundFile(true);
46 //writer.infoStream = System.out;
47 for (int i = 0; i < 1000; i++) {
48 Document doc = new Document();
49 Field.TermVector termVector;
52 if (mod2 == 0 && mod3 == 0){
53 termVector = Field.TermVector.WITH_POSITIONS_OFFSETS;
56 termVector = Field.TermVector.WITH_POSITIONS;
59 termVector = Field.TermVector.WITH_OFFSETS;
62 termVector = Field.TermVector.YES;
64 doc.add(new Field("field", English.intToEnglish(i),
65 Field.Store.YES, Field.Index.ANALYZED, termVector));
66 writer.addDocument(doc);
68 reader = writer.getReader();
70 searcher = newSearcher(reader);
74 public void tearDown() throws Exception {
82 assertTrue(searcher != null);
85 public void testTermVectors() {
86 Query query = new TermQuery(new Term("field", "seventy"));
88 ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
89 assertEquals(100, hits.length);
91 for (int i = 0; i < hits.length; i++)
93 TermFreqVector [] vector = searcher.reader.getTermFreqVectors(hits[i].doc);
94 assertTrue(vector != null);
95 assertTrue(vector.length == 1);
97 } catch (IOException e) {
102 public void testTermVectorsFieldOrder() throws IOException {
103 Directory dir = newDirectory();
104 RandomIndexWriter writer = new RandomIndexWriter(random, dir, new MockAnalyzer(random, MockTokenizer.SIMPLE, true));
105 Document doc = new Document();
106 doc.add(new Field("c", "some content here", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
107 doc.add(new Field("a", "some content here", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
108 doc.add(new Field("b", "some content here", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
109 doc.add(new Field("x", "some content here", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
110 writer.addDocument(doc);
111 IndexReader reader = writer.getReader();
113 TermFreqVector[] v = reader.getTermFreqVectors(0);
114 assertEquals(4, v.length);
115 String[] expectedFields = new String[]{"a", "b", "c", "x"};
116 int[] expectedPositions = new int[]{1, 2, 0};
117 for(int i=0;i<v.length;i++) {
118 TermPositionVector posVec = (TermPositionVector) v[i];
119 assertEquals(expectedFields[i], posVec.getField());
120 String[] terms = posVec.getTerms();
121 assertEquals(3, terms.length);
122 assertEquals("content", terms[0]);
123 assertEquals("here", terms[1]);
124 assertEquals("some", terms[2]);
125 for(int j=0;j<3;j++) {
126 int[] positions = posVec.getTermPositions(j);
127 assertEquals(1, positions.length);
128 assertEquals(expectedPositions[j], positions[0]);
135 public void testTermPositionVectors() throws IOException {
136 Query query = new TermQuery(new Term("field", "zero"));
137 ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
138 assertEquals(1, hits.length);
140 for (int i = 0; i < hits.length; i++)
142 TermFreqVector [] vector = searcher.reader.getTermFreqVectors(hits[i].doc);
143 assertTrue(vector != null);
144 assertTrue(vector.length == 1);
146 boolean shouldBePosVector = (hits[i].doc % 2 == 0) ? true : false;
147 assertTrue((shouldBePosVector == false) || (shouldBePosVector == true && (vector[0] instanceof TermPositionVector == true)));
149 boolean shouldBeOffVector = (hits[i].doc % 3 == 0) ? true : false;
150 assertTrue((shouldBeOffVector == false) || (shouldBeOffVector == true && (vector[0] instanceof TermPositionVector == true)));
152 if(shouldBePosVector || shouldBeOffVector){
153 TermPositionVector posVec = (TermPositionVector)vector[0];
154 String [] terms = posVec.getTerms();
155 assertTrue(terms != null && terms.length > 0);
157 for (int j = 0; j < terms.length; j++) {
158 int [] positions = posVec.getTermPositions(j);
159 TermVectorOffsetInfo [] offsets = posVec.getOffsets(j);
161 if(shouldBePosVector){
162 assertTrue(positions != null);
163 assertTrue(positions.length > 0);
166 assertTrue(positions == null);
168 if(shouldBeOffVector){
169 assertTrue(offsets != null);
170 assertTrue(offsets.length > 0);
173 assertTrue(offsets == null);
180 catch(ClassCastException ignore){
181 TermFreqVector freqVec = vector[0];
182 String [] terms = freqVec.getTerms();
183 assertTrue(terms != null && terms.length > 0);
191 public void testTermOffsetVectors() {
192 Query query = new TermQuery(new Term("field", "fifty"));
194 ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
195 assertEquals(100, hits.length);
197 for (int i = 0; i < hits.length; i++)
199 TermFreqVector [] vector = searcher.reader.getTermFreqVectors(hits[i].doc);
200 assertTrue(vector != null);
201 assertTrue(vector.length == 1);
205 } catch (IOException e) {
210 public void testKnownSetOfDocuments() throws IOException {
211 String test1 = "eating chocolate in a computer lab"; //6 terms
212 String test2 = "computer in a computer lab"; //5 terms
213 String test3 = "a chocolate lab grows old"; //5 terms
214 String test4 = "eating chocolate with a chocolate lab in an old chocolate colored computer lab"; //13 terms
215 Map<String,Integer> test4Map = new HashMap<String,Integer>();
216 test4Map.put("chocolate", Integer.valueOf(3));
217 test4Map.put("lab", Integer.valueOf(2));
218 test4Map.put("eating", Integer.valueOf(1));
219 test4Map.put("computer", Integer.valueOf(1));
220 test4Map.put("with", Integer.valueOf(1));
221 test4Map.put("a", Integer.valueOf(1));
222 test4Map.put("colored", Integer.valueOf(1));
223 test4Map.put("in", Integer.valueOf(1));
224 test4Map.put("an", Integer.valueOf(1));
225 test4Map.put("computer", Integer.valueOf(1));
226 test4Map.put("old", Integer.valueOf(1));
228 Document testDoc1 = new Document();
229 setupDoc(testDoc1, test1);
230 Document testDoc2 = new Document();
231 setupDoc(testDoc2, test2);
232 Document testDoc3 = new Document();
233 setupDoc(testDoc3, test3);
234 Document testDoc4 = new Document();
235 setupDoc(testDoc4, test4);
237 Directory dir = newDirectory();
239 RandomIndexWriter writer = new RandomIndexWriter(random, dir,
240 newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.SIMPLE, true))
241 .setOpenMode(OpenMode.CREATE).setMergePolicy(newLogMergePolicy()));
242 writer.addDocument(testDoc1);
243 writer.addDocument(testDoc2);
244 writer.addDocument(testDoc3);
245 writer.addDocument(testDoc4);
246 IndexReader reader = writer.getReader();
248 IndexSearcher knownSearcher = newSearcher(reader);
249 TermEnum termEnum = knownSearcher.reader.terms();
250 TermDocs termDocs = knownSearcher.reader.termDocs();
251 //System.out.println("Terms: " + termEnum.size() + " Orig Len: " + termArray.length);
253 //Similarity sim = knownSearcher.getSimilarity();
254 while (termEnum.next() == true)
256 Term term = termEnum.term();
257 //System.out.println("Term: " + term);
259 while (termDocs.next())
261 int docId = termDocs.doc();
262 int freq = termDocs.freq();
263 //System.out.println("Doc Id: " + docId + " freq " + freq);
264 TermFreqVector vector = knownSearcher.reader.getTermFreqVector(docId, "field");
265 //float tf = sim.tf(freq);
266 //float idf = sim.idf(knownSearcher.docFreq(term), knownSearcher.maxDoc());
267 //float qNorm = sim.queryNorm()
268 //This is fine since we don't have stop words
269 //float lNorm = sim.lengthNorm("field", vector.getTerms().length);
270 //float coord = sim.coord()
271 //System.out.println("TF: " + tf + " IDF: " + idf + " LenNorm: " + lNorm);
272 assertTrue(vector != null);
273 String[] vTerms = vector.getTerms();
274 int [] freqs = vector.getTermFrequencies();
275 for (int i = 0; i < vTerms.length; i++)
277 if (term.text().equals(vTerms[i]))
279 assertTrue(freqs[i] == freq);
284 //System.out.println("--------");
286 Query query = new TermQuery(new Term("field", "chocolate"));
287 ScoreDoc[] hits = knownSearcher.search(query, null, 1000).scoreDocs;
288 //doc 3 should be the first hit b/c it is the shortest match
289 assertTrue(hits.length == 3);
290 /*System.out.println("Hit 0: " + hits.id(0) + " Score: " + hits.score(0) + " String: " + hits.doc(0).toString());
291 System.out.println("Explain: " + knownSearcher.explain(query, hits.id(0)));
292 System.out.println("Hit 1: " + hits.id(1) + " Score: " + hits.score(1) + " String: " + hits.doc(1).toString());
293 System.out.println("Explain: " + knownSearcher.explain(query, hits.id(1)));
294 System.out.println("Hit 2: " + hits.id(2) + " Score: " + hits.score(2) + " String: " + hits.doc(2).toString());
295 System.out.println("Explain: " + knownSearcher.explain(query, hits.id(2)));*/
296 assertTrue(hits[0].doc == 2);
297 assertTrue(hits[1].doc == 3);
298 assertTrue(hits[2].doc == 0);
299 TermFreqVector vector = knownSearcher.reader.getTermFreqVector(hits[1].doc, "field");
300 assertTrue(vector != null);
301 //System.out.println("Vector: " + vector);
302 String[] terms = vector.getTerms();
303 int [] freqs = vector.getTermFrequencies();
304 assertTrue(terms != null && terms.length == 10);
305 for (int i = 0; i < terms.length; i++) {
306 String term = terms[i];
307 //System.out.println("Term: " + term);
309 assertTrue(test4.indexOf(term) != -1);
310 Integer freqInt = test4Map.get(term);
311 assertTrue(freqInt != null);
312 assertTrue(freqInt.intValue() == freq);
314 SortedTermVectorMapper mapper = new SortedTermVectorMapper(new TermVectorEntryFreqSortedComparator());
315 knownSearcher.reader.getTermFreqVector(hits[1].doc, mapper);
316 SortedSet<TermVectorEntry> vectorEntrySet = mapper.getTermVectorEntrySet();
317 assertTrue("mapper.getTermVectorEntrySet() Size: " + vectorEntrySet.size() + " is not: " + 10, vectorEntrySet.size() == 10);
318 TermVectorEntry last = null;
319 for (final TermVectorEntry tve : vectorEntrySet) {
320 if (tve != null && last != null)
322 assertTrue("terms are not properly sorted", last.getFrequency() >= tve.getFrequency());
323 Integer expectedFreq = test4Map.get(tve.getTerm());
324 //we expect double the expectedFreq, since there are two fields with the exact same text and we are collapsing all fields
325 assertTrue("Frequency is not correct:", tve.getFrequency() == 2*expectedFreq.intValue());
331 FieldSortedTermVectorMapper fieldMapper = new FieldSortedTermVectorMapper(new TermVectorEntryFreqSortedComparator());
332 knownSearcher.reader.getTermFreqVector(hits[1].doc, fieldMapper);
333 Map<String,SortedSet<TermVectorEntry>> map = fieldMapper.getFieldToTerms();
334 assertTrue("map Size: " + map.size() + " is not: " + 2, map.size() == 2);
335 vectorEntrySet = map.get("field");
336 assertTrue("vectorEntrySet is null and it shouldn't be", vectorEntrySet != null);
337 assertTrue("vectorEntrySet Size: " + vectorEntrySet.size() + " is not: " + 10, vectorEntrySet.size() == 10);
338 knownSearcher.close();
343 private void setupDoc(Document doc, String text)
345 doc.add(new Field("field2", text, Field.Store.YES,
346 Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
347 doc.add(new Field("field", text, Field.Store.YES,
348 Field.Index.ANALYZED, Field.TermVector.YES));
349 //System.out.println("Document: " + doc);
352 // Test only a few docs having vectors
353 public void testRareVectors() throws IOException {
354 RandomIndexWriter writer = new RandomIndexWriter(random, directory,
355 newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.SIMPLE, true))
356 .setOpenMode(OpenMode.CREATE));
357 writer.w.setInfoStream(VERBOSE ? System.out : null);
359 System.out.println("TEST: now add non-vectors");
361 for (int i = 0; i < 100; i++) {
362 Document doc = new Document();
363 doc.add(new Field("field", English.intToEnglish(i),
364 Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.NO));
365 writer.addDocument(doc);
368 System.out.println("TEST: now add vectors");
370 for(int i=0;i<10;i++) {
371 Document doc = new Document();
372 doc.add(new Field("field", English.intToEnglish(100+i),
373 Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
374 writer.addDocument(doc);
378 System.out.println("TEST: now getReader");
380 IndexReader reader = writer.getReader();
382 searcher = newSearcher(reader);
384 Query query = new TermQuery(new Term("field", "hundred"));
385 ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
386 assertEquals(10, hits.length);
387 for (int i = 0; i < hits.length; i++) {
389 TermFreqVector [] vector = searcher.reader.getTermFreqVectors(hits[i].doc);
390 assertTrue(vector != null);
391 assertTrue(vector.length == 1);
397 // In a single doc, for the same field, mix the term
399 public void testMixedVectrosVectors() throws IOException {
400 RandomIndexWriter writer = new RandomIndexWriter(random, directory,
401 newIndexWriterConfig(TEST_VERSION_CURRENT,
402 new MockAnalyzer(random, MockTokenizer.SIMPLE, true)).setOpenMode(OpenMode.CREATE));
403 Document doc = new Document();
404 doc.add(new Field("field", "one",
405 Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.NO));
406 doc.add(new Field("field", "one",
407 Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.YES));
408 doc.add(new Field("field", "one",
409 Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS));
410 doc.add(new Field("field", "one",
411 Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_OFFSETS));
412 doc.add(new Field("field", "one",
413 Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
414 writer.addDocument(doc);
415 IndexReader reader = writer.getReader();
418 searcher = newSearcher(reader);
420 Query query = new TermQuery(new Term("field", "one"));
421 ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
422 assertEquals(1, hits.length);
424 TermFreqVector [] vector = searcher.reader.getTermFreqVectors(hits[0].doc);
425 assertTrue(vector != null);
426 assertTrue(vector.length == 1);
427 TermPositionVector tfv = (TermPositionVector) vector[0];
428 assertTrue(tfv.getField().equals("field"));
429 String[] terms = tfv.getTerms();
430 assertEquals(1, terms.length);
431 assertEquals(terms[0], "one");
432 assertEquals(5, tfv.getTermFrequencies()[0]);
434 int[] positions = tfv.getTermPositions(0);
435 assertEquals(5, positions.length);
437 assertEquals(i, positions[i]);
438 TermVectorOffsetInfo[] offsets = tfv.getOffsets(0);
439 assertEquals(5, offsets.length);
440 for(int i=0;i<5;i++) {
441 assertEquals(4*i, offsets[i].getStartOffset());
442 assertEquals(4*i+3, offsets[i].getEndOffset());