X-Git-Url: https://git.mdrn.pl/pylucene.git/blobdiff_plain/a2e61f0c04805cfcb8706176758d1283c7e3a55c..aaeed5504b982cf3545252ab528713250aa33eed:/lucene-java-3.4.0/lucene/src/java/org/apache/lucene/search/Similarity.java diff --git a/lucene-java-3.4.0/lucene/src/java/org/apache/lucene/search/Similarity.java b/lucene-java-3.4.0/lucene/src/java/org/apache/lucene/search/Similarity.java deleted file mode 100644 index 5ae94dc..0000000 --- a/lucene-java-3.4.0/lucene/src/java/org/apache/lucene/search/Similarity.java +++ /dev/null @@ -1,921 +0,0 @@ -package org.apache.lucene.search; - -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - -import org.apache.lucene.index.FieldInvertState; -import org.apache.lucene.index.Term; -import org.apache.lucene.search.Explanation.IDFExplanation; -import org.apache.lucene.util.SmallFloat; -import org.apache.lucene.util.VirtualMethod; - -import java.io.IOException; -import java.io.Serializable; -import java.util.Collection; - - -/** - * Expert: Scoring API. - * - *
Similarity defines the components of Lucene scoring. - * Overriding computation of these components is a convenient - * way to alter Lucene scoring. - * - *
Suggested reading: - * - * Introduction To Information Retrieval, Chapter 6. - * - *
The following describes how Lucene scoring evolves from - * underlying information retrieval models to (efficient) implementation. - * We first brief on VSM Score, - * then derive from it Lucene's Conceptual Scoring Formula, - * from which, finally, evolves Lucene's Practical Scoring Function - * (the latter is connected directly with Lucene classes and methods). - * - *
Lucene combines - * - * Boolean model (BM) of Information Retrieval - * with - * - * Vector Space Model (VSM) of Information Retrieval - - * documents "approved" by BM are scored by VSM. - * - *
In VSM, documents and queries are represented as - * weighted vectors in a multi-dimensional space, - * where each distinct index term is a dimension, - * and weights are - * Tf-idf values. - * - *
VSM does not require weights to be Tf-idf values, - * but Tf-idf values are believed to produce search results of high quality, - * and so Lucene is using Tf-idf. - * Tf and Idf are described in more detail below, - * but for now, for completion, let's just say that - * for given term t and document (or query) x, - * Tf(t,x) varies with the number of occurrences of term t in x - * (when one increases so does the other) and - * idf(t) similarly varies with the inverse of the - * number of index documents containing term t. - * - *
VSM score of document d for query q is the
- *
- * Cosine Similarity
- * of the weighted query vectors V(q) and V(d):
- *
- *
- *
- *
| ||||||
- * |
Note: the above equation can be viewed as the dot product of - * the normalized weighted vectors, in the sense that dividing - * V(q) by its euclidean norm is normalizing it to a unit vector. - * - *
Lucene refines VSM score for both search quality and usability: - *
Under the simplifying assumption of a single field in the index,
- * we get Lucene's Conceptual scoring formula:
- *
- *
- *
- *
| |||||||
- * |
The conceptual formula is a simplification in the sense that (1) terms and documents - * are fielded and (2) boosts are usually per query term rather than per query. - * - *
We now describe how Lucene implements this conceptual scoring formula, and - * derive from it Lucene's Practical Scoring Function. - * - *
For efficient score computation some scoring components - * are computed and aggregated in advance: - * - *
Lucene's Practical Scoring Function is derived from the above. - * The color codes demonstrate how it relates - * to those of the conceptual formula: - * - *
- *
- *
| |||||||
- * |
where - *
- * {@link org.apache.lucene.search.DefaultSimilarity#tf(float) tf(t in d)} = - * | - *- * frequency½ - * | - *
- * {@link org.apache.lucene.search.DefaultSimilarity#idf(int, int) idf(t)} = - * | - *- * 1 + log ( - * | - *
- *
|
- * - * ) - * | - *
- * queryNorm(q) = - * {@link org.apache.lucene.search.DefaultSimilarity#queryNorm(float) queryNorm(sumOfSquaredWeights)} - * = - * | - *
- *
|
- *
- * {@link org.apache.lucene.search.Weight#sumOfSquaredWeights() sumOfSquaredWeights} = - * {@link org.apache.lucene.search.Query#getBoost() q.getBoost()} 2 - * · - * | - *- * ∑ - * | - *- * ( - * idf(t) · - * t.getBoost() - * ) 2 - * | - *
- * | t in q | - *- * |
- * When a document is added to the index, all the above factors are multiplied.
- * If the document has multiple fields with the same name, all their boosts are multiplied together:
- *
- *
- *
- * norm(t,d) = - * {@link org.apache.lucene.document.Document#getBoost() doc.getBoost()} - * · - * lengthNorm - * · - * | - *- * ∏ - * | - *- * {@link org.apache.lucene.document.Fieldable#getBoost() f.getBoost}() - * | - *
- * | field f in d named as t | - *- * |
This is initially an instance of {@link DefaultSimilarity}. - * - * @see Searcher#setSimilarity(Similarity) - * @see org.apache.lucene.index.IndexWriter#setSimilarity(Similarity) - */ - public static Similarity getDefault() { - return Similarity.defaultImpl; - } - - /** Cache of decoded bytes. */ - private static final float[] NORM_TABLE = new float[256]; - - static { - for (int i = 0; i < 256; i++) - NORM_TABLE[i] = SmallFloat.byte315ToFloat((byte)i); - } - - /** - * Decodes a normalization factor stored in an index. - * @see #decodeNormValue(byte) - * @deprecated Use {@link #decodeNormValue} instead. - */ - @Deprecated - public static float decodeNorm(byte b) { - return NORM_TABLE[b & 0xFF]; // & 0xFF maps negative bytes to positive above 127 - } - - /** Decodes a normalization factor stored in an index. - *
- * WARNING: If you override this method, you should change the default - * Similarity to your implementation with {@link Similarity#setDefault(Similarity)}. - * Otherwise, your method may not always be called, especially if you omit norms - * for some fields. - * @see #encodeNormValue(float) - */ - public float decodeNormValue(byte b) { - return NORM_TABLE[b & 0xFF]; // & 0xFF maps negative bytes to positive above 127 - } - - /** Returns a table for decoding normalization bytes. - * @see #encodeNormValue(float) - * @see #decodeNormValue(byte) - * - * @deprecated Use instance methods for encoding/decoding norm values to enable customization. - */ - @Deprecated - public static float[] getNormDecoder() { - return NORM_TABLE; - } - - /** - * Computes the normalization value for a field, given the accumulated - * state of term processing for this field (see {@link FieldInvertState}). - * - *
Implementations should calculate a float value based on the field - * state and then return that value. - * - *
Matches in longer fields are less precise, so implementations of this
- * method usually return smaller values when state.getLength()
is large,
- * and larger values when state.getLength()
is small.
- *
- *
Note that the return values are computed under - * {@link org.apache.lucene.index.IndexWriter#addDocument(org.apache.lucene.document.Document)} - * and then stored using - * {@link #encodeNormValue(float)}. - * Thus they have limited precision, and documents - * must be re-indexed if this method is altered. - * - *
For backward compatibility this method by default calls - * {@link #lengthNorm(String, int)} passing - * {@link FieldInvertState#getLength()} as the second argument, and - * then multiplies this value by {@link FieldInvertState#getBoost()}.
- * - * @lucene.experimental - * - * @param field field name - * @param state current processing state for this field - * @return the calculated float norm - */ - public abstract float computeNorm(String field, FieldInvertState state); - - /** Computes the normalization value for a field given the total number of - * terms contained in a field. These values, together with field boosts, are - * stored in an index and multipled into scores for hits on each field by the - * search code. - * - *Matches in longer fields are less precise, so implementations of this
- * method usually return smaller values when numTokens
is large,
- * and larger values when numTokens
is small.
- *
- *
Note that the return values are computed under - * {@link org.apache.lucene.index.IndexWriter#addDocument(org.apache.lucene.document.Document)} - * and then stored using - * {@link #encodeNormValue(float)}. - * Thus they have limited precision, and documents - * must be re-indexed if this method is altered. - * - * @param fieldName the name of the field - * @param numTokens the total number of tokens contained in fields named - * fieldName of doc. - * @return a normalization factor for hits on this field of this document - * - * @see org.apache.lucene.document.Field#setBoost(float) - * - * @deprecated Please override computeNorm instead - */ - @Deprecated - public final float lengthNorm(String fieldName, int numTokens) { - throw new UnsupportedOperationException("please use computeNorm instead"); - } - - /** Computes the normalization value for a query given the sum of the squared - * weights of each of the query terms. This value is multiplied into the - * weight of each query term. While the classic query normalization factor is - * computed as 1/sqrt(sumOfSquaredWeights), other implementations might - * completely ignore sumOfSquaredWeights (ie return 1). - * - *
This does not affect ranking, but the default implementation does make scores - * from different queries more comparable than they would be by eliminating the - * magnitude of the Query vector as a factor in the score. - * - * @param sumOfSquaredWeights the sum of the squares of query term weights - * @return a normalization factor for query weights - */ - public abstract float queryNorm(float sumOfSquaredWeights); - - /** Encodes a normalization factor for storage in an index. - * - *
The encoding uses a three-bit mantissa, a five-bit exponent, and - * the zero-exponent point at 15, thus - * representing values from around 7x10^9 to 2x10^-9 with about one - * significant decimal digit of accuracy. Zero is also represented. - * Negative numbers are rounded up to zero. Values too large to represent - * are rounded down to the largest representable value. Positive values too - * small to represent are rounded up to the smallest positive representable - * value. - *
- * WARNING: If you override this method, you should change the default - * Similarity to your implementation with {@link Similarity#setDefault(Similarity)}. - * Otherwise, your method may not always be called, especially if you omit norms - * for some fields. - * @see org.apache.lucene.document.Field#setBoost(float) - * @see org.apache.lucene.util.SmallFloat - */ - public byte encodeNormValue(float f) { - return SmallFloat.floatToByte315(f); - } - - /** - * Static accessor kept for backwards compability reason, use encodeNormValue instead. - * @param f norm-value to encode - * @return byte representing the given float - * @deprecated Use {@link #encodeNormValue} instead. - * - * @see #encodeNormValue(float) - */ - @Deprecated - public static byte encodeNorm(float f) { - return SmallFloat.floatToByte315(f); - } - - - /** Computes a score factor based on a term or phrase's frequency in a - * document. This value is multiplied by the {@link #idf(int, int)} - * factor for each term in the query and these products are then summed to - * form the initial score for a document. - * - *
Terms and phrases repeated in a document indicate the topic of the
- * document, so implementations of this method usually return larger values
- * when freq
is large, and smaller values when freq
- * is small.
- *
- *
The default implementation calls {@link #tf(float)}. - * - * @param freq the frequency of a term within a document - * @return a score factor based on a term's within-document frequency - */ - public float tf(int freq) { - return tf((float)freq); - } - - /** Computes the amount of a sloppy phrase match, based on an edit distance. - * This value is summed for each sloppy phrase match in a document to form - * the frequency that is passed to {@link #tf(float)}. - * - *
A phrase match with a small edit distance to a document passage more - * closely matches the document, so implementations of this method usually - * return larger values when the edit distance is small and smaller values - * when it is large. - * - * @see PhraseQuery#setSlop(int) - * @param distance the edit distance of this sloppy phrase match - * @return the frequency increment for this match - */ - public abstract float sloppyFreq(int distance); - - /** Computes a score factor based on a term or phrase's frequency in a - * document. This value is multiplied by the {@link #idf(int, int)} - * factor for each term in the query and these products are then summed to - * form the initial score for a document. - * - *
Terms and phrases repeated in a document indicate the topic of the
- * document, so implementations of this method usually return larger values
- * when freq
is large, and smaller values when freq
- * is small.
- *
- * @param freq the frequency of a term within a document
- * @return a score factor based on a term's within-document frequency
- */
- public abstract float tf(float freq);
-
-
- /**
- * Computes a score factor for a simple term and returns an explanation
- * for that score factor.
- *
- *
- * The default implementation uses: - * - *
- * idf(searcher.docFreq(term), searcher.maxDoc()); - *- * - * Note that {@link Searcher#maxDoc()} is used instead of - * {@link org.apache.lucene.index.IndexReader#numDocs() IndexReader#numDocs()} because also - * {@link Searcher#docFreq(Term)} is used, and when the latter - * is inaccurate, so is {@link Searcher#maxDoc()}, and in the same direction. - * In addition, {@link Searcher#maxDoc()} is more efficient to compute - * - * @param term the term in question - * @param searcher the document collection being searched - * @return an IDFExplain object that includes both an idf score factor - and an explanation for the term. - * @throws IOException - */ - public IDFExplanation idfExplain(final Term term, final Searcher searcher, int docFreq) throws IOException { - - if (!hasIDFExplainWithDocFreqAPI) { - // Fallback to slow impl - return idfExplain(term, searcher); - } - final int df = docFreq; - final int max = searcher.maxDoc(); - final float idf = idf(df, max); - return new IDFExplanation() { - @Override - public String explain() { - return "idf(docFreq=" + df + - ", maxDocs=" + max + ")"; - } - @Override - public float getIdf() { - return idf; - }}; - } - - /** - * This method forwards to {@link - * #idfExplain(Term,Searcher,int)} by passing - *
searcher.docFreq(term)
as the docFreq.
- *
- * WARNING: if you subclass Similariary and override this
- * method then you may hit a peformance hit for certain
- * queries. Better to override {@link
- * #idfExplain(Term,Searcher,int)} instead.
- */
- public IDFExplanation idfExplain(final Term term, final Searcher searcher) throws IOException {
- return idfExplain(term, searcher, searcher.docFreq(term));
- }
-
- /**
- * Computes a score factor for a phrase.
- *
- *
- * The default implementation sums the idf factor for
- * each term in the phrase.
- *
- * @param terms the terms in the phrase
- * @param searcher the document collection being searched
- * @return an IDFExplain object that includes both an idf
- * score factor for the phrase and an explanation
- * for each term.
- * @throws IOException
- */
- public IDFExplanation idfExplain(Collection Terms that occur in fewer documents are better indicators of topic, so
- * implementations of this method usually return larger values for rare terms,
- * and smaller values for common terms.
- *
- * @param docFreq the number of documents which contain the term
- * @param numDocs the total number of documents in the collection
- * @return a score factor based on the term's document frequency
- */
- public abstract float idf(int docFreq, int numDocs);
-
- /** Computes a score factor based on the fraction of all query terms that a
- * document contains. This value is multiplied into scores.
- *
- * The presence of a large portion of the query terms indicates a better
- * match with the query, so implementations of this method usually return
- * larger values when the ratio between these parameters is large and smaller
- * values when the ratio between them is small.
- *
- * @param overlap the number of query terms matched in the document
- * @param maxOverlap the total number of terms in the query
- * @return a score factor based on term overlap with the query
- */
- public abstract float coord(int overlap, int maxOverlap);
-
- /**
- * Calculate a scoring factor based on the data in the payload. Overriding implementations
- * are responsible for interpreting what is in the payload. Lucene makes no assumptions about
- * what is in the byte array.
- *
- * The default implementation returns 1.
- *
- * @param docId The docId currently being scored. If this value is {@link #NO_DOC_ID_PROVIDED}, then it should be assumed that the PayloadQuery implementation does not provide document information
- * @param fieldName The fieldName of the term this payload belongs to
- * @param start The start position of the payload
- * @param end The end position of the payload
- * @param payload The payload byte array to be scored
- * @param offset The offset into the payload array
- * @param length The length in the array
- * @return An implementation dependent float to be used as a scoring factor
- *
- */
- public float scorePayload(int docId, String fieldName, int start, int end, byte [] payload, int offset, int length)
- {
- return 1;
- }
-
-}