diff options
author | Adrien Grand <jpountz@gmail.com> | 2017-06-22 12:35:33 +0200 |
---|---|---|
committer | GitHub <noreply@github.com> | 2017-06-22 12:35:33 +0200 |
commit | 44e9c0b9473ffa8ce536953ed886988c7bffc95f (patch) | |
tree | 5666f433b35ada8375dee3c47255d891162e2076 /core/src/main/java | |
parent | a9775690858e2272f0f00f917ae1d7c25753909b (diff) |
Upgrade to lucene-7.0.0-snapshot-ad2cb77. (#25349)
Most notable changes:
- better update concurrency: LUCENE-7868
- TopDocs.totalHits is now a long: LUCENE-7872
- QueryBuilder does not remove the boolean query around multi-term synonyms:
LUCENE-7878
- removal of Fields: LUCENE-7500
For the `TopDocs.totalHits` change, this PR relies on the fact that the encoding
of vInts and vLongs are compatible: you can write and read with any of them as
long as the value can be represented by a positive int.
Diffstat (limited to 'core/src/main/java')
7 files changed, 17 insertions, 20 deletions
diff --git a/core/src/main/java/org/elasticsearch/common/lucene/Lucene.java b/core/src/main/java/org/elasticsearch/common/lucene/Lucene.java index 52550f1ba6..10adf530b1 100644 --- a/core/src/main/java/org/elasticsearch/common/lucene/Lucene.java +++ b/core/src/main/java/org/elasticsearch/common/lucene/Lucene.java @@ -272,7 +272,7 @@ public class Lucene { public static TopDocs readTopDocs(StreamInput in) throws IOException { byte type = in.readByte(); if (type == 0) { - int totalHits = in.readVInt(); + long totalHits = in.readVLong(); float maxScore = in.readFloat(); ScoreDoc[] scoreDocs = new ScoreDoc[in.readVInt()]; @@ -281,7 +281,7 @@ public class Lucene { } return new TopDocs(totalHits, scoreDocs, maxScore); } else if (type == 1) { - int totalHits = in.readVInt(); + long totalHits = in.readVLong(); float maxScore = in.readFloat(); SortField[] fields = new SortField[in.readVInt()]; @@ -385,7 +385,7 @@ public class Lucene { out.writeByte((byte) 2); CollapseTopFieldDocs collapseDocs = (CollapseTopFieldDocs) topDocs; - out.writeVInt(topDocs.totalHits); + out.writeVLong(topDocs.totalHits); out.writeFloat(topDocs.getMaxScore()); out.writeString(collapseDocs.field); @@ -405,7 +405,7 @@ public class Lucene { out.writeByte((byte) 1); TopFieldDocs topFieldDocs = (TopFieldDocs) topDocs; - out.writeVInt(topDocs.totalHits); + out.writeVLong(topDocs.totalHits); out.writeFloat(topDocs.getMaxScore()); out.writeVInt(topFieldDocs.fields.length); @@ -419,7 +419,7 @@ public class Lucene { } } else { out.writeByte((byte) 0); - out.writeVInt(topDocs.totalHits); + out.writeVLong(topDocs.totalHits); out.writeFloat(topDocs.getMaxScore()); out.writeVInt(topDocs.scoreDocs.length); diff --git a/core/src/main/java/org/elasticsearch/common/lucene/uid/PerThreadIDVersionAndSeqNoLookup.java b/core/src/main/java/org/elasticsearch/common/lucene/uid/PerThreadIDVersionAndSeqNoLookup.java index ae3d978928..2b37c338c9 100644 --- a/core/src/main/java/org/elasticsearch/common/lucene/uid/PerThreadIDVersionAndSeqNoLookup.java +++ b/core/src/main/java/org/elasticsearch/common/lucene/uid/PerThreadIDVersionAndSeqNoLookup.java @@ -19,7 +19,6 @@ package org.elasticsearch.common.lucene.uid; * under the License. */ -import org.apache.lucene.index.Fields; import org.apache.lucene.index.LeafReader; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.NumericDocValues; @@ -67,8 +66,7 @@ final class PerThreadIDVersionAndSeqNoLookup { */ PerThreadIDVersionAndSeqNoLookup(LeafReader reader, String uidField) throws IOException { this.uidField = uidField; - Fields fields = reader.fields(); - Terms terms = fields.terms(uidField); + Terms terms = reader.terms(uidField); if (terms == null) { throw new IllegalArgumentException("reader misses the [" + uidField + "] field"); } diff --git a/core/src/main/java/org/elasticsearch/index/fielddata/plain/PagedBytesIndexFieldData.java b/core/src/main/java/org/elasticsearch/index/fielddata/plain/PagedBytesIndexFieldData.java index eab98040bb..fa126d6813 100644 --- a/core/src/main/java/org/elasticsearch/index/fielddata/plain/PagedBytesIndexFieldData.java +++ b/core/src/main/java/org/elasticsearch/index/fielddata/plain/PagedBytesIndexFieldData.java @@ -180,8 +180,7 @@ public class PagedBytesIndexFieldData extends AbstractIndexOrdinalsFieldData { LeafReader reader = context.reader(); Terms terms = reader.terms(getFieldName()); - Fields fields = reader.fields(); - final Terms fieldTerms = fields.terms(getFieldName()); + final Terms fieldTerms = reader.terms(getFieldName()); if (fieldTerms instanceof FieldReader) { final Stats stats = ((FieldReader) fieldTerms).getStats(); diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/InternalTopHits.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/InternalTopHits.java index d1986f2dd2..e2cb84f03a 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/InternalTopHits.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/InternalTopHits.java @@ -191,7 +191,7 @@ public class InternalTopHits extends InternalAggregation implements TopHits { protected int doHashCode() { int hashCode = from; hashCode = 31 * hashCode + size; - hashCode = 31 * hashCode + topDocs.totalHits; + hashCode = 31 * hashCode + Long.hashCode(topDocs.totalHits); for (int d = 0; d < topDocs.scoreDocs.length; d++) { ScoreDoc doc = topDocs.scoreDocs[d]; hashCode = 31 * hashCode + doc.doc; diff --git a/core/src/main/java/org/elasticsearch/search/internal/ScrollContext.java b/core/src/main/java/org/elasticsearch/search/internal/ScrollContext.java index 163dbcc73d..75d48d5d63 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/ScrollContext.java +++ b/core/src/main/java/org/elasticsearch/search/internal/ScrollContext.java @@ -30,7 +30,7 @@ public final class ScrollContext { private Map<String, Object> context = null; - public int totalHits = -1; + public long totalHits = -1; public float maxScore; public ScoreDoc lastEmittedDoc; public Scroll scroll; diff --git a/core/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java b/core/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java index f071c62f12..8549f42040 100644 --- a/core/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java +++ b/core/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java @@ -56,7 +56,7 @@ public final class QuerySearchResult extends SearchPhaseResult { private ProfileShardResult profileShardResults; private boolean hasProfileResults; private boolean hasScoreDocs; - private int totalHits; + private long totalHits; private float maxScore; public QuerySearchResult() { @@ -317,7 +317,7 @@ public final class QuerySearchResult extends SearchPhaseResult { out.writeOptionalWriteable(profileShardResults); } - public int getTotalHits() { + public long getTotalHits() { return totalHits; } diff --git a/core/src/main/java/org/elasticsearch/search/suggest/completion/CompletionFieldStats.java b/core/src/main/java/org/elasticsearch/search/suggest/completion/CompletionFieldStats.java index 8b5761a7e9..c9b8356362 100644 --- a/core/src/main/java/org/elasticsearch/search/suggest/completion/CompletionFieldStats.java +++ b/core/src/main/java/org/elasticsearch/search/suggest/completion/CompletionFieldStats.java @@ -20,7 +20,8 @@ package org.elasticsearch.search.suggest.completion; import com.carrotsearch.hppc.ObjectLongHashMap; -import org.apache.lucene.index.Fields; + +import org.apache.lucene.index.FieldInfo; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.LeafReader; import org.apache.lucene.index.LeafReaderContext; @@ -49,14 +50,13 @@ public class CompletionFieldStats { for (LeafReaderContext atomicReaderContext : indexReader.leaves()) { LeafReader atomicReader = atomicReaderContext.reader(); try { - Fields fields = atomicReader.fields(); - for (String fieldName : fields) { - Terms terms = fields.terms(fieldName); + for (FieldInfo info : atomicReader.getFieldInfos()) { + Terms terms = atomicReader.terms(info.name); if (terms instanceof CompletionTerms) { // TODO: currently we load up the suggester for reporting its size long fstSize = ((CompletionTerms) terms).suggester().ramBytesUsed(); - if (fieldNamePatterns != null && fieldNamePatterns.length > 0 && Regex.simpleMatch(fieldNamePatterns, fieldName)) { - completionFields.addTo(fieldName, fstSize); + if (fieldNamePatterns != null && fieldNamePatterns.length > 0 && Regex.simpleMatch(fieldNamePatterns, info.name)) { + completionFields.addTo(info.name, fstSize); } sizeInBytes += fstSize; } |